2007-06-02 12:16:21 +08:00
|
|
|
//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2007-06-02 12:16:21 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code to emit Decl nodes as LLVM code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
#include "CGBlocks.h"
|
2016-09-08 02:21:30 +08:00
|
|
|
#include "CGCXXABI.h"
|
2015-04-23 05:38:15 +08:00
|
|
|
#include "CGCleanup.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CGDebugInfo.h"
|
2011-09-20 05:14:35 +08:00
|
|
|
#include "CGOpenCLRuntime.h"
|
2016-03-04 17:22:22 +08:00
|
|
|
#include "CGOpenMPRuntime.h"
|
2017-05-19 02:51:09 +08:00
|
|
|
#include "CodeGenFunction.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CodeGenModule.h"
|
2017-08-16 05:42:52 +08:00
|
|
|
#include "ConstantEmitter.h"
|
2019-04-12 08:11:27 +08:00
|
|
|
#include "PatternInit.h"
|
2017-05-19 02:51:09 +08:00
|
|
|
#include "TargetInfo.h"
|
2008-08-11 13:00:27 +08:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2019-12-10 08:11:56 +08:00
|
|
|
#include "clang/AST/Attr.h"
|
2009-12-22 22:23:30 +08:00
|
|
|
#include "clang/AST/CharUnits.h"
|
2008-08-11 13:35:13 +08:00
|
|
|
#include "clang/AST/Decl.h"
|
2008-08-25 09:38:19 +08:00
|
|
|
#include "clang/AST/DeclObjC.h"
|
2016-03-03 13:21:39 +08:00
|
|
|
#include "clang/AST/DeclOpenMP.h"
|
2018-12-11 11:18:39 +08:00
|
|
|
#include "clang/Basic/CodeGenOptions.h"
|
2008-04-19 12:17:09 +08:00
|
|
|
#include "clang/Basic/SourceManager.h"
|
2008-05-08 13:58:21 +08:00
|
|
|
#include "clang/Basic/TargetInfo.h"
|
2013-10-31 05:53:58 +08:00
|
|
|
#include "clang/CodeGen/CGFunctionInfo.h"
|
2018-09-21 21:54:09 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
2016-02-11 03:11:58 +08:00
|
|
|
|
2007-06-02 12:16:21 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2007-06-09 09:20:56 +08:00
|
|
|
void CodeGenFunction::EmitDecl(const Decl &D) {
|
|
|
|
switch (D.getKind()) {
|
2015-11-04 11:40:30 +08:00
|
|
|
case Decl::BuiltinTemplate:
|
2010-04-23 10:02:43 +08:00
|
|
|
case Decl::TranslationUnit:
|
2015-03-07 08:04:49 +08:00
|
|
|
case Decl::ExternCContext:
|
2010-04-23 10:02:43 +08:00
|
|
|
case Decl::Namespace:
|
|
|
|
case Decl::UnresolvedUsingTypename:
|
|
|
|
case Decl::ClassTemplateSpecialization:
|
|
|
|
case Decl::ClassTemplatePartialSpecialization:
|
2013-08-06 09:03:05 +08:00
|
|
|
case Decl::VarTemplateSpecialization:
|
|
|
|
case Decl::VarTemplatePartialSpecialization:
|
2010-04-23 10:02:43 +08:00
|
|
|
case Decl::TemplateTypeParm:
|
|
|
|
case Decl::UnresolvedUsingValue:
|
2010-05-30 15:21:58 +08:00
|
|
|
case Decl::NonTypeTemplateParm:
|
2017-02-18 04:05:37 +08:00
|
|
|
case Decl::CXXDeductionGuide:
|
2010-04-23 10:02:43 +08:00
|
|
|
case Decl::CXXMethod:
|
|
|
|
case Decl::CXXConstructor:
|
|
|
|
case Decl::CXXDestructor:
|
|
|
|
case Decl::CXXConversion:
|
|
|
|
case Decl::Field:
|
2013-04-16 15:28:30 +08:00
|
|
|
case Decl::MSProperty:
|
2010-11-21 14:49:41 +08:00
|
|
|
case Decl::IndirectField:
|
2010-04-23 10:02:43 +08:00
|
|
|
case Decl::ObjCIvar:
|
2011-08-24 06:38:00 +08:00
|
|
|
case Decl::ObjCAtDefsField:
|
2007-10-09 05:37:32 +08:00
|
|
|
case Decl::ParmVar:
|
2010-04-23 10:02:43 +08:00
|
|
|
case Decl::ImplicitParam:
|
|
|
|
case Decl::ClassTemplate:
|
2013-08-06 09:03:05 +08:00
|
|
|
case Decl::VarTemplate:
|
2010-04-23 10:02:43 +08:00
|
|
|
case Decl::FunctionTemplate:
|
2011-05-06 05:57:07 +08:00
|
|
|
case Decl::TypeAliasTemplate:
|
2010-04-23 10:02:43 +08:00
|
|
|
case Decl::TemplateTemplateParm:
|
|
|
|
case Decl::ObjCMethod:
|
|
|
|
case Decl::ObjCCategory:
|
|
|
|
case Decl::ObjCProtocol:
|
|
|
|
case Decl::ObjCInterface:
|
|
|
|
case Decl::ObjCCategoryImpl:
|
|
|
|
case Decl::ObjCImplementation:
|
|
|
|
case Decl::ObjCProperty:
|
|
|
|
case Decl::ObjCCompatibleAlias:
|
2016-03-03 01:28:48 +08:00
|
|
|
case Decl::PragmaComment:
|
2016-03-03 03:28:54 +08:00
|
|
|
case Decl::PragmaDetectMismatch:
|
2010-06-05 13:09:32 +08:00
|
|
|
case Decl::AccessSpec:
|
2010-04-23 10:02:43 +08:00
|
|
|
case Decl::LinkageSpec:
|
2016-09-09 07:14:54 +08:00
|
|
|
case Decl::Export:
|
2010-04-23 10:02:43 +08:00
|
|
|
case Decl::ObjCPropertyImpl:
|
|
|
|
case Decl::FileScopeAsm:
|
|
|
|
case Decl::Friend:
|
|
|
|
case Decl::FriendTemplate:
|
|
|
|
case Decl::Block:
|
2013-04-17 03:37:38 +08:00
|
|
|
case Decl::Captured:
|
2011-08-14 11:52:19 +08:00
|
|
|
case Decl::ClassScopeFunctionSpecialization:
|
2013-05-20 12:58:53 +08:00
|
|
|
case Decl::UsingShadow:
|
P0136R1, DR1573, DR1645, DR1715, DR1736, DR1903, DR1941, DR1959, DR1991:
Replace inheriting constructors implementation with new approach, voted into
C++ last year as a DR against C++11.
Instead of synthesizing a set of derived class constructors for each inherited
base class constructor, we make the constructors of the base class visible to
constructor lookup in the derived class, using the normal rules for
using-declarations.
For constructors, UsingShadowDecl now has a ConstructorUsingShadowDecl derived
class that tracks the requisite additional information. We create shadow
constructors (not found by name lookup) in the derived class to model the
actual initialization, and have a new expression node,
CXXInheritedCtorInitExpr, to model the initialization of a base class from such
a constructor. (This initialization is special because it performs real perfect
forwarding of arguments.)
In cases where argument forwarding is not possible (for inalloca calls,
variadic calls, and calls with callee parameter cleanup), the shadow inheriting
constructor is not emitted and instead we directly emit the initialization code
into the caller of the inherited constructor.
Note that this new model is not perfectly compatible with the old model in some
corner cases. In particular:
* if B inherits a private constructor from A, and C uses that constructor to
construct a B, then we previously required that A befriends B and B
befriends C, but the new rules require A to befriend C directly, and
* if a derived class has its own constructors (and so its implicit default
constructor is suppressed), it may still inherit a default constructor from
a base class
llvm-svn: 274049
2016-06-29 03:03:57 +08:00
|
|
|
case Decl::ConstructorUsingShadow:
|
Parsing, semantic analysis, and AST for Objective-C type parameters.
Produce type parameter declarations for Objective-C type parameters,
and attach lists of type parameters to Objective-C classes,
categories, forward declarations, and extensions as
appropriate. Perform semantic analysis of type bounds for type
parameters, both in isolation and across classes/categories/extensions
to ensure consistency.
Also handle (de-)serialization of Objective-C type parameter lists,
along with sundry other things one must do to add a new declaration to
Clang.
Note that Objective-C type parameters are typedef name declarations,
like typedefs and C++11 type aliases, in support of type erasure.
Part of rdar://problem/6294649.
llvm-svn: 241541
2015-07-07 11:57:15 +08:00
|
|
|
case Decl::ObjCTypeParam:
|
2016-08-15 09:33:41 +08:00
|
|
|
case Decl::Binding:
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable("Declaration should not be in declstmts!");
|
2016-04-30 09:44:38 +08:00
|
|
|
case Decl::Function: // void X();
|
|
|
|
case Decl::Record: // struct/union/class X;
|
|
|
|
case Decl::Enum: // enum X;
|
|
|
|
case Decl::EnumConstant: // enum ? { X = ? }
|
|
|
|
case Decl::CXXRecord: // struct/union/class X; [C++]
|
2009-12-04 01:26:31 +08:00
|
|
|
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
|
2011-02-18 10:08:43 +08:00
|
|
|
case Decl::Label: // __label__ x;
|
2011-12-03 07:23:56 +08:00
|
|
|
case Decl::Import:
|
2013-03-22 14:34:35 +08:00
|
|
|
case Decl::OMPThreadPrivate:
|
2019-03-08 01:54:44 +08:00
|
|
|
case Decl::OMPAllocate:
|
2016-02-11 13:35:55 +08:00
|
|
|
case Decl::OMPCapturedExpr:
|
2018-09-26 12:28:39 +08:00
|
|
|
case Decl::OMPRequires:
|
2013-02-23 01:15:32 +08:00
|
|
|
case Decl::Empty:
|
2019-07-11 05:25:49 +08:00
|
|
|
case Decl::Concept:
|
2019-11-17 18:41:55 +08:00
|
|
|
case Decl::LifetimeExtendedTemporary:
|
2007-06-02 12:16:21 +08:00
|
|
|
// None of these decls require codegen support.
|
|
|
|
return;
|
2013-02-02 08:39:32 +08:00
|
|
|
|
2013-05-21 06:50:41 +08:00
|
|
|
case Decl::NamespaceAlias:
|
|
|
|
if (CGDebugInfo *DI = getDebugInfo())
|
2016-04-30 09:44:38 +08:00
|
|
|
DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
|
2013-05-21 06:50:41 +08:00
|
|
|
return;
|
2013-05-20 12:58:53 +08:00
|
|
|
case Decl::Using: // using X; [C++]
|
|
|
|
if (CGDebugInfo *DI = getDebugInfo())
|
2016-04-30 09:44:38 +08:00
|
|
|
DI->EmitUsingDecl(cast<UsingDecl>(D));
|
2013-05-20 12:58:53 +08:00
|
|
|
return;
|
2016-12-21 05:35:28 +08:00
|
|
|
case Decl::UsingPack:
|
|
|
|
for (auto *Using : cast<UsingPackDecl>(D).expansions())
|
|
|
|
EmitDecl(*Using);
|
|
|
|
return;
|
2013-04-22 14:13:21 +08:00
|
|
|
case Decl::UsingDirective: // using namespace X; [C++]
|
|
|
|
if (CGDebugInfo *DI = getDebugInfo())
|
|
|
|
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
|
|
|
|
return;
|
2016-07-23 07:36:59 +08:00
|
|
|
case Decl::Var:
|
|
|
|
case Decl::Decomposition: {
|
2008-08-30 01:28:43 +08:00
|
|
|
const VarDecl &VD = cast<VarDecl>(D);
|
2010-10-15 12:57:14 +08:00
|
|
|
assert(VD.isLocalVarDecl() &&
|
2008-08-30 01:28:43 +08:00
|
|
|
"Should not see file-scope variables inside a function!");
|
2016-08-15 09:33:41 +08:00
|
|
|
EmitVarDecl(VD);
|
|
|
|
if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
|
|
|
|
for (auto *B : DD->bindings())
|
|
|
|
if (auto *HD = B->getHoldingVar())
|
|
|
|
EmitVarDecl(*HD);
|
|
|
|
return;
|
2008-08-30 01:28:43 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2016-03-03 13:21:39 +08:00
|
|
|
case Decl::OMPDeclareReduction:
|
2016-03-04 17:22:22 +08:00
|
|
|
return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
|
2016-03-03 13:21:39 +08:00
|
|
|
|
2019-02-02 04:25:04 +08:00
|
|
|
case Decl::OMPDeclareMapper:
|
|
|
|
return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
|
|
|
|
|
2011-04-15 22:24:37 +08:00
|
|
|
case Decl::Typedef: // typedef int X;
|
|
|
|
case Decl::TypeAlias: { // using X = int; [C++0x]
|
|
|
|
const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
|
2008-12-21 05:51:53 +08:00
|
|
|
QualType Ty = TD.getUnderlyingType();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-12-21 05:51:53 +08:00
|
|
|
if (Ty->isVariablyModifiedType())
|
2011-06-25 05:55:10 +08:00
|
|
|
EmitVariablyModifiedType(Ty);
|
2019-07-05 22:36:08 +08:00
|
|
|
|
|
|
|
return;
|
2008-12-21 05:51:53 +08:00
|
|
|
}
|
2007-06-02 12:16:21 +08:00
|
|
|
}
|
2007-06-02 12:53:11 +08:00
|
|
|
}
|
|
|
|
|
2010-10-15 12:57:14 +08:00
|
|
|
/// EmitVarDecl - This method handles emission of any variable declaration
|
2007-06-02 12:53:11 +08:00
|
|
|
/// inside a function, including static vars etc.
|
2010-10-15 12:57:14 +08:00
|
|
|
void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
|
2017-05-15 22:47:47 +08:00
|
|
|
if (D.hasExternalStorage())
|
|
|
|
// Don't emit it now, allow it to be emitted lazily on its first use.
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Some function-scope variable does not have static storage but still
|
|
|
|
// needs to be emitted like a static variable, e.g. a function-scope
|
|
|
|
// variable in constant address space in OpenCL.
|
|
|
|
if (D.getStorageDuration() != SD_Automatic) {
|
2017-11-15 19:38:17 +08:00
|
|
|
// Static sampler variables translated to function calls.
|
|
|
|
if (D.getType()->isSamplerT())
|
|
|
|
return;
|
|
|
|
|
2011-08-24 06:38:00 +08:00
|
|
|
llvm::GlobalValue::LinkageTypes Linkage =
|
2019-07-16 12:46:31 +08:00
|
|
|
CGM.getLLVMLinkageVarDefinition(&D, /*IsConstant=*/false);
|
2010-02-07 10:03:08 +08:00
|
|
|
|
2014-04-29 06:17:59 +08:00
|
|
|
// FIXME: We need to force the emission/use of a guard variable for
|
|
|
|
// some variables even if we can constant-evaluate them because
|
|
|
|
// we can't guarantee every translation unit will constant-evaluate them.
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2010-10-15 12:57:14 +08:00
|
|
|
return EmitStaticVarDecl(D, Linkage);
|
2010-02-07 10:03:08 +08:00
|
|
|
}
|
2013-05-16 19:27:56 +08:00
|
|
|
|
2015-09-30 22:08:20 +08:00
|
|
|
if (D.getType().getAddressSpace() == LangAS::opencl_local)
|
2011-09-20 05:14:35 +08:00
|
|
|
return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
|
2009-04-14 10:25:56 +08:00
|
|
|
|
2013-05-16 19:27:56 +08:00
|
|
|
assert(D.hasLocalStorage());
|
|
|
|
return EmitAutoVarDecl(D);
|
2007-06-02 12:16:21 +08:00
|
|
|
}
|
|
|
|
|
2014-10-08 09:07:54 +08:00
|
|
|
static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
|
|
|
|
if (CGM.getLangOpts().CPlusPlus)
|
2014-06-06 06:10:59 +08:00
|
|
|
return CGM.getMangledName(&D).str();
|
|
|
|
|
2014-10-08 09:07:54 +08:00
|
|
|
// If this isn't C++, we don't need a mangled name, just a pretty one.
|
|
|
|
assert(!D.isExternallyVisible() && "name shouldn't matter");
|
|
|
|
std::string ContextName;
|
|
|
|
const DeclContext *DC = D.getDeclContext();
|
2015-05-07 14:28:46 +08:00
|
|
|
if (auto *CD = dyn_cast<CapturedDecl>(DC))
|
|
|
|
DC = cast<DeclContext>(CD->getNonClosureContext());
|
2014-10-08 09:07:54 +08:00
|
|
|
if (const auto *FD = dyn_cast<FunctionDecl>(DC))
|
2014-06-06 06:10:59 +08:00
|
|
|
ContextName = CGM.getMangledName(FD);
|
2014-10-08 09:07:54 +08:00
|
|
|
else if (const auto *BD = dyn_cast<BlockDecl>(DC))
|
|
|
|
ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
|
|
|
|
else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
|
|
|
|
ContextName = OMD->getSelector().getAsString();
|
2009-12-05 16:22:11 +08:00
|
|
|
else
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable("Unknown context for static var decl");
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2014-10-08 09:07:54 +08:00
|
|
|
ContextName += "." + D.getNameAsString();
|
|
|
|
return ContextName;
|
2009-12-05 16:22:11 +08:00
|
|
|
}
|
|
|
|
|
2014-10-08 09:07:54 +08:00
|
|
|
llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
|
|
|
|
const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
|
|
|
|
// In general, we don't always emit static var decls once before we reference
|
|
|
|
// them. It is possible to reference them before emitting the function that
|
|
|
|
// contains them, and it is possible to emit the containing function multiple
|
|
|
|
// times.
|
|
|
|
if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
|
|
|
|
return ExistingGV;
|
|
|
|
|
2009-02-26 03:24:29 +08:00
|
|
|
QualType Ty = D.getType();
|
|
|
|
assert(Ty->isConstantSizeType() && "VLAs can't be static");
|
|
|
|
|
2011-11-21 05:05:04 +08:00
|
|
|
// Use the label if the variable is renamed with the asm-label extension.
|
|
|
|
std::string Name;
|
2011-11-21 23:47:23 +08:00
|
|
|
if (D.hasAttr<AsmLabelAttr>())
|
2014-10-08 09:07:54 +08:00
|
|
|
Name = getMangledName(&D);
|
2011-11-21 23:47:23 +08:00
|
|
|
else
|
2014-10-08 09:07:54 +08:00
|
|
|
Name = getStaticDeclName(*this, D);
|
2009-02-26 03:24:29 +08:00
|
|
|
|
2014-10-08 09:07:54 +08:00
|
|
|
llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
|
Convert clang::LangAS to a strongly typed enum
Summary:
Convert clang::LangAS to a strongly typed enum
Currently both clang AST address spaces and target specific address spaces
are represented as unsigned which can lead to subtle errors if the wrong
type is passed. It is especially confusing in the CodeGen files as it is
not possible to see what kind of address space should be passed to a
function without looking at the implementation.
I originally made this change for our LLVM fork for the CHERI architecture
where we make extensive use of address spaces to differentiate between
capabilities and pointers. When merging the upstream changes I usually
run into some test failures or runtime crashes because the wrong kind of
address space is passed to a function. By converting the LangAS enum to a
C++11 we can catch these errors at compile time. Additionally, it is now
obvious from the function signature which kind of address space it expects.
I found the following errors while writing this patch:
- ItaniumRecordLayoutBuilder::LayoutField was passing a clang AST address
space to TargetInfo::getPointer{Width,Align}()
- TypePrinter::printAttributedAfter() prints the numeric value of the
clang AST address space instead of the target address space.
However, this code is not used so I kept the current behaviour
- initializeForBlockHeader() in CGBlocks.cpp was passing
LangAS::opencl_generic to TargetInfo::getPointer{Width,Align}()
- CodeGenFunction::EmitBlockLiteral() was passing a AST address space to
TargetInfo::getPointerWidth()
- CGOpenMPRuntimeNVPTX::translateParameter() passed a target address space
to Qualifiers::addAddressSpace()
- CGOpenMPRuntimeNVPTX::getParameterAddress() was using
llvm::Type::getPointerTo() with a AST address space
- clang_getAddressSpace() returns either a LangAS or a target address
space. As this is exposed to C I have kept the current behaviour and
added a comment stating that it is probably not correct.
Other than this the patch should not cause any functional changes.
Reviewers: yaxunl, pcc, bader
Reviewed By: yaxunl, bader
Subscribers: jlebar, jholewinski, nhaehnle, Anastasia, cfe-commits
Differential Revision: https://reviews.llvm.org/D38816
llvm-svn: 315871
2017-10-16 02:48:14 +08:00
|
|
|
LangAS AS = GetGlobalVarAddressSpace(&D);
|
2017-07-08 21:24:52 +08:00
|
|
|
unsigned TargetAS = getContext().getTargetAddressSpace(AS);
|
2014-11-04 00:51:53 +08:00
|
|
|
|
2018-04-03 01:38:24 +08:00
|
|
|
// OpenCL variables in local address space and CUDA shared
|
|
|
|
// variables cannot have an initializer.
|
2014-11-04 00:51:53 +08:00
|
|
|
llvm::Constant *Init = nullptr;
|
2018-04-03 01:38:24 +08:00
|
|
|
if (Ty.getAddressSpace() == LangAS::opencl_local ||
|
|
|
|
D.hasAttr<CUDASharedAttr>())
|
2014-11-04 00:51:53 +08:00
|
|
|
Init = llvm::UndefValue::get(LTy);
|
2018-04-03 01:38:24 +08:00
|
|
|
else
|
|
|
|
Init = EmitNullConstant(Ty);
|
2014-11-04 00:51:53 +08:00
|
|
|
|
2017-07-08 21:24:52 +08:00
|
|
|
llvm::GlobalVariable *GV = new llvm::GlobalVariable(
|
|
|
|
getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
|
|
|
|
nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
|
2019-10-03 21:00:29 +08:00
|
|
|
GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
|
2012-06-28 16:01:44 +08:00
|
|
|
|
2015-05-10 05:10:07 +08:00
|
|
|
if (supportsCOMDAT() && GV->isWeakForLinker())
|
|
|
|
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
|
2015-01-13 06:13:53 +08:00
|
|
|
|
2013-04-13 10:43:54 +08:00
|
|
|
if (D.getTLSKind())
|
2014-10-08 09:07:54 +08:00
|
|
|
setTLSMode(GV, D);
|
2012-06-28 16:01:44 +08:00
|
|
|
|
2018-02-24 03:30:48 +08:00
|
|
|
setGVProperties(GV, &D);
|
|
|
|
|
2014-03-25 06:05:38 +08:00
|
|
|
// Make sure the result is of the correct type.
|
Convert clang::LangAS to a strongly typed enum
Summary:
Convert clang::LangAS to a strongly typed enum
Currently both clang AST address spaces and target specific address spaces
are represented as unsigned which can lead to subtle errors if the wrong
type is passed. It is especially confusing in the CodeGen files as it is
not possible to see what kind of address space should be passed to a
function without looking at the implementation.
I originally made this change for our LLVM fork for the CHERI architecture
where we make extensive use of address spaces to differentiate between
capabilities and pointers. When merging the upstream changes I usually
run into some test failures or runtime crashes because the wrong kind of
address space is passed to a function. By converting the LangAS enum to a
C++11 we can catch these errors at compile time. Additionally, it is now
obvious from the function signature which kind of address space it expects.
I found the following errors while writing this patch:
- ItaniumRecordLayoutBuilder::LayoutField was passing a clang AST address
space to TargetInfo::getPointer{Width,Align}()
- TypePrinter::printAttributedAfter() prints the numeric value of the
clang AST address space instead of the target address space.
However, this code is not used so I kept the current behaviour
- initializeForBlockHeader() in CGBlocks.cpp was passing
LangAS::opencl_generic to TargetInfo::getPointer{Width,Align}()
- CodeGenFunction::EmitBlockLiteral() was passing a AST address space to
TargetInfo::getPointerWidth()
- CGOpenMPRuntimeNVPTX::translateParameter() passed a target address space
to Qualifiers::addAddressSpace()
- CGOpenMPRuntimeNVPTX::getParameterAddress() was using
llvm::Type::getPointerTo() with a AST address space
- clang_getAddressSpace() returns either a LangAS or a target address
space. As this is exposed to C I have kept the current behaviour and
added a comment stating that it is probably not correct.
Other than this the patch should not cause any functional changes.
Reviewers: yaxunl, pcc, bader
Reviewed By: yaxunl, bader
Subscribers: jlebar, jholewinski, nhaehnle, Anastasia, cfe-commits
Differential Revision: https://reviews.llvm.org/D38816
llvm-svn: 315871
2017-10-16 02:48:14 +08:00
|
|
|
LangAS ExpectedAS = Ty.getAddressSpace();
|
2014-10-08 09:07:54 +08:00
|
|
|
llvm::Constant *Addr = GV;
|
2017-07-08 21:24:52 +08:00
|
|
|
if (AS != ExpectedAS) {
|
|
|
|
Addr = getTargetCodeGenInfo().performAddrSpaceCast(
|
|
|
|
*this, GV, AS, ExpectedAS,
|
|
|
|
LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
|
2014-03-25 06:05:38 +08:00
|
|
|
}
|
|
|
|
|
2014-10-08 09:07:54 +08:00
|
|
|
setStaticLocalDeclAddress(&D, Addr);
|
|
|
|
|
|
|
|
// Ensure that the static local gets initialized by making sure the parent
|
|
|
|
// function gets emitted eventually.
|
|
|
|
const Decl *DC = cast<Decl>(D.getDeclContext());
|
|
|
|
|
|
|
|
// We can't name blocks or captured statements directly, so try to emit their
|
|
|
|
// parents.
|
|
|
|
if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
|
|
|
|
DC = DC->getNonClosureContext();
|
|
|
|
// FIXME: Ensure that global blocks get emitted.
|
|
|
|
if (!DC)
|
|
|
|
return Addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
GlobalDecl GD;
|
|
|
|
if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
|
|
|
|
GD = GlobalDecl(CD, Ctor_Base);
|
|
|
|
else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
|
|
|
|
GD = GlobalDecl(DD, Dtor_Base);
|
|
|
|
else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
|
|
|
|
GD = GlobalDecl(FD);
|
|
|
|
else {
|
|
|
|
// Don't do anything for Obj-C method decls or global closures. We should
|
|
|
|
// never defer them.
|
|
|
|
assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
|
|
|
|
}
|
2018-03-15 23:47:20 +08:00
|
|
|
if (GD.getDecl()) {
|
|
|
|
// Disable emission of the parent function for the OpenMP device codegen.
|
|
|
|
CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
|
2014-10-08 09:07:54 +08:00
|
|
|
(void)GetAddrOfGlobal(GD);
|
2018-03-15 23:47:20 +08:00
|
|
|
}
|
2014-10-08 09:07:54 +08:00
|
|
|
|
|
|
|
return Addr;
|
2009-02-26 03:24:29 +08:00
|
|
|
}
|
|
|
|
|
2012-03-31 03:44:53 +08:00
|
|
|
/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
|
|
|
|
/// global variable that has already been created for it. If the initializer
|
|
|
|
/// has a different type than GV does, this may free GV and return a different
|
|
|
|
/// one. Otherwise it just returns GV.
|
|
|
|
llvm::GlobalVariable *
|
2010-10-15 12:57:14 +08:00
|
|
|
CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
|
2012-03-31 03:44:53 +08:00
|
|
|
llvm::GlobalVariable *GV) {
|
2017-08-16 05:42:52 +08:00
|
|
|
ConstantEmitter emitter(*this);
|
|
|
|
llvm::Constant *Init = emitter.tryEmitForInitializer(D);
|
2010-07-16 07:40:35 +08:00
|
|
|
|
2009-12-05 16:22:11 +08:00
|
|
|
// If constant emission failed, then this should be a C++ static
|
|
|
|
// initializer.
|
2012-03-31 03:44:53 +08:00
|
|
|
if (!Init) {
|
2012-11-02 06:30:59 +08:00
|
|
|
if (!getLangOpts().CPlusPlus)
|
2009-12-05 16:22:11 +08:00
|
|
|
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
|
2017-01-11 01:43:01 +08:00
|
|
|
else if (HaveInsertPoint()) {
|
2011-08-24 06:38:00 +08:00
|
|
|
// Since we have a static initializer, this global variable can't
|
2010-01-26 12:02:23 +08:00
|
|
|
// be constant.
|
2012-03-31 03:44:53 +08:00
|
|
|
GV->setConstant(false);
|
2010-09-08 09:44:27 +08:00
|
|
|
|
2012-03-31 03:44:53 +08:00
|
|
|
EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
|
2010-01-26 12:02:23 +08:00
|
|
|
}
|
2012-03-31 03:44:53 +08:00
|
|
|
return GV;
|
2009-12-05 16:22:11 +08:00
|
|
|
}
|
2010-07-16 07:40:35 +08:00
|
|
|
|
2009-12-05 16:22:11 +08:00
|
|
|
// The initializer may differ in type from the global. Rewrite
|
|
|
|
// the global to match the initializer. (We have to do this
|
|
|
|
// because some types, like unions, can't be completely represented
|
|
|
|
// in the LLVM type system.)
|
2012-03-31 03:44:53 +08:00
|
|
|
if (GV->getType()->getElementType() != Init->getType()) {
|
|
|
|
llvm::GlobalVariable *OldGV = GV;
|
|
|
|
|
|
|
|
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
|
|
|
|
OldGV->isConstant(),
|
|
|
|
OldGV->getLinkage(), Init, "",
|
|
|
|
/*InsertBefore*/ OldGV,
|
2012-06-23 19:51:46 +08:00
|
|
|
OldGV->getThreadLocalMode(),
|
2012-03-31 03:44:53 +08:00
|
|
|
CGM.getContext().getTargetAddressSpace(D.getType()));
|
|
|
|
GV->setVisibility(OldGV->getVisibility());
|
2018-02-08 06:15:33 +08:00
|
|
|
GV->setDSOLocal(OldGV->isDSOLocal());
|
2015-07-21 04:35:30 +08:00
|
|
|
GV->setComdat(OldGV->getComdat());
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2009-12-05 16:22:11 +08:00
|
|
|
// Steal the name of the old global
|
2012-03-31 03:44:53 +08:00
|
|
|
GV->takeName(OldGV);
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2009-12-05 16:22:11 +08:00
|
|
|
// Replace all uses of the old global with the new global
|
2012-03-31 03:44:53 +08:00
|
|
|
llvm::Constant *NewPtrForOldDecl =
|
|
|
|
llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
|
|
|
|
OldGV->replaceAllUsesWith(NewPtrForOldDecl);
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2009-12-05 16:22:11 +08:00
|
|
|
// Erase the old global, since it is no longer used.
|
2012-03-31 03:44:53 +08:00
|
|
|
OldGV->eraseFromParent();
|
2009-12-05 16:22:11 +08:00
|
|
|
}
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2012-03-31 03:44:53 +08:00
|
|
|
GV->setConstant(CGM.isTypeConstant(D.getType(), true));
|
|
|
|
GV->setInitializer(Init);
|
2012-02-14 06:16:19 +08:00
|
|
|
|
2017-08-16 05:42:52 +08:00
|
|
|
emitter.finalize(GV);
|
|
|
|
|
2019-09-29 13:08:46 +08:00
|
|
|
if (D.needsDestruction(getContext()) && HaveInsertPoint()) {
|
2012-02-14 06:16:19 +08:00
|
|
|
// We have a constant initializer, but a nontrivial destructor. We still
|
|
|
|
// need to perform a guarded "initialization" in order to register the
|
2012-02-17 14:48:11 +08:00
|
|
|
// destructor.
|
2012-03-31 03:44:53 +08:00
|
|
|
EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
|
2012-02-14 06:16:19 +08:00
|
|
|
}
|
|
|
|
|
2012-03-31 03:44:53 +08:00
|
|
|
return GV;
|
2009-12-05 16:22:11 +08:00
|
|
|
}
|
|
|
|
|
2010-10-15 12:57:14 +08:00
|
|
|
void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
|
2010-02-07 10:03:08 +08:00
|
|
|
llvm::GlobalValue::LinkageTypes Linkage) {
|
2012-03-31 05:00:39 +08:00
|
|
|
// Check to see if we already have a global variable for this
|
|
|
|
// declaration. This can happen when double-emitting function
|
|
|
|
// bodies, e.g. with complete and base constructors.
|
2014-10-08 09:07:54 +08:00
|
|
|
llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits alignment = getContext().getDeclAlign(&D);
|
2009-02-26 03:45:19 +08:00
|
|
|
|
2009-02-26 04:08:33 +08:00
|
|
|
// Store into LocalDeclMap before generating initializer to handle
|
|
|
|
// circular references.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
setAddrOfLocalVar(&D, Address(addr, alignment));
|
2009-02-26 04:08:33 +08:00
|
|
|
|
2010-05-05 04:45:42 +08:00
|
|
|
// We can't have a VLA here, but we can have a pointer to a VLA,
|
|
|
|
// even though that doesn't really make any sense.
|
2009-04-20 11:54:15 +08:00
|
|
|
// Make sure to evaluate VLA bounds now so that we have them for later.
|
|
|
|
if (D.getType()->isVariablyModifiedType())
|
2011-06-25 05:55:10 +08:00
|
|
|
EmitVariablyModifiedType(D.getType());
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2012-03-31 05:00:39 +08:00
|
|
|
// Save the type in case adding the initializer forces a type change.
|
|
|
|
llvm::Type *expectedType = addr->getType();
|
2009-04-20 11:54:15 +08:00
|
|
|
|
2014-03-25 06:05:38 +08:00
|
|
|
llvm::GlobalVariable *var =
|
|
|
|
cast<llvm::GlobalVariable>(addr->stripPointerCasts());
|
2016-05-10 06:09:56 +08:00
|
|
|
|
|
|
|
// CUDA's local and local static __shared__ variables should not
|
|
|
|
// have any non-empty initializers. This is ensured by Sema.
|
|
|
|
// Whatever initializer such variable may have when it gets here is
|
|
|
|
// a no-op and should not be emitted.
|
|
|
|
bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
|
|
|
|
D.hasAttr<CUDASharedAttr>();
|
2009-12-05 16:22:11 +08:00
|
|
|
// If this value has an initializer, emit it.
|
2016-05-10 06:09:56 +08:00
|
|
|
if (D.getInit() && !isCudaSharedVar)
|
2012-03-31 05:00:39 +08:00
|
|
|
var = AddInitializerToStaticVarDecl(D, var);
|
2008-04-19 12:17:09 +08:00
|
|
|
|
2019-10-03 21:00:29 +08:00
|
|
|
var->setAlignment(alignment.getAsAlign());
|
2010-03-11 07:59:59 +08:00
|
|
|
|
2011-09-10 06:41:49 +08:00
|
|
|
if (D.hasAttr<AnnotateAttr>())
|
2012-03-31 05:00:39 +08:00
|
|
|
CGM.AddGlobalAnnotations(&D, var);
|
2008-04-19 12:17:09 +08:00
|
|
|
|
2017-06-05 18:11:57 +08:00
|
|
|
if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
|
|
|
|
var->addAttribute("bss-section", SA->getName());
|
|
|
|
if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
|
|
|
|
var->addAttribute("data-section", SA->getName());
|
|
|
|
if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
|
|
|
|
var->addAttribute("rodata-section", SA->getName());
|
2019-10-16 02:31:10 +08:00
|
|
|
if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
|
|
|
|
var->addAttribute("relro-section", SA->getName());
|
2017-06-05 18:11:57 +08:00
|
|
|
|
2009-06-30 10:34:44 +08:00
|
|
|
if (const SectionAttr *SA = D.getAttr<SectionAttr>())
|
2012-03-31 05:00:39 +08:00
|
|
|
var->setSection(SA->getName());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-06-30 10:34:44 +08:00
|
|
|
if (D.hasAttr<UsedAttr>())
|
2014-03-07 06:15:10 +08:00
|
|
|
CGM.addUsedGlobal(var);
|
2012-03-31 03:44:53 +08:00
|
|
|
|
|
|
|
// We may have to cast the constant because of the initializer
|
|
|
|
// mismatch above.
|
|
|
|
//
|
|
|
|
// FIXME: It is really dangerous to store this in the map; if anyone
|
|
|
|
// RAUW's the GV uses of this constant will be invalid.
|
2014-03-25 06:05:38 +08:00
|
|
|
llvm::Constant *castedAddr =
|
|
|
|
llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
if (var != castedAddr)
|
|
|
|
LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
|
2012-03-31 05:00:39 +08:00
|
|
|
CGM.setStaticLocalDeclAddress(&D, castedAddr);
|
2008-06-05 16:59:10 +08:00
|
|
|
|
2014-08-02 05:35:28 +08:00
|
|
|
CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
|
2014-07-03 00:54:41 +08:00
|
|
|
|
2008-06-05 16:59:10 +08:00
|
|
|
// Emit global variable debug descriptor for static vars.
|
2009-02-13 16:11:52 +08:00
|
|
|
CGDebugInfo *DI = getDebugInfo();
|
2012-05-04 15:39:27 +08:00
|
|
|
if (DI &&
|
2016-02-02 19:06:51 +08:00
|
|
|
CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo) {
|
2008-10-18 00:15:48 +08:00
|
|
|
DI->setLocation(D.getLocation());
|
2012-03-31 05:00:39 +08:00
|
|
|
DI->EmitGlobalVariable(var, &D);
|
2008-06-05 16:59:10 +08:00
|
|
|
}
|
2007-10-17 08:52:43 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-14 04:32:21 +08:00
|
|
|
namespace {
|
2015-08-19 06:40:54 +08:00
|
|
|
struct DestroyObject final : EHScopeStack::Cleanup {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
DestroyObject(Address addr, QualType type,
|
2011-07-11 16:38:19 +08:00
|
|
|
CodeGenFunction::Destroyer *destroyer,
|
|
|
|
bool useEHCleanupForArray)
|
2012-01-26 11:33:36 +08:00
|
|
|
: addr(addr), type(type), destroyer(destroyer),
|
2011-07-11 16:38:19 +08:00
|
|
|
useEHCleanupForArray(useEHCleanupForArray) {}
|
2010-07-14 04:32:21 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr;
|
2011-07-09 09:37:26 +08:00
|
|
|
QualType type;
|
2012-01-26 11:33:36 +08:00
|
|
|
CodeGenFunction::Destroyer *destroyer;
|
2011-07-11 16:38:19 +08:00
|
|
|
bool useEHCleanupForArray;
|
2010-07-14 04:32:21 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-07-11 16:38:19 +08:00
|
|
|
// Don't use an EH cleanup recursively from an EH cleanup.
|
2011-07-13 04:27:29 +08:00
|
|
|
bool useEHCleanupForArray =
|
|
|
|
flags.isForNormalCleanup() && this->useEHCleanupForArray;
|
2011-07-11 16:38:19 +08:00
|
|
|
|
|
|
|
CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
|
2010-07-14 04:32:21 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-03-30 01:56:24 +08:00
|
|
|
template <class Derived>
|
|
|
|
struct DestroyNRVOVariable : EHScopeStack::Cleanup {
|
2019-07-22 17:39:13 +08:00
|
|
|
DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
|
|
|
|
: NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
|
2010-07-14 04:32:21 +08:00
|
|
|
|
|
|
|
llvm::Value *NRVOFlag;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Loc;
|
2019-07-22 17:39:13 +08:00
|
|
|
QualType Ty;
|
2010-07-14 04:32:21 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2010-07-14 04:32:21 +08:00
|
|
|
// Along the exceptions path we always execute the dtor.
|
2011-07-13 04:27:29 +08:00
|
|
|
bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
|
2010-07-14 04:32:21 +08:00
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::BasicBlock *SkipDtorBB = nullptr;
|
2010-07-14 04:32:21 +08:00
|
|
|
if (NRVO) {
|
|
|
|
// If we exited via NRVO, we skip the destructor call.
|
|
|
|
llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
|
|
|
|
SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *DidNRVO =
|
|
|
|
CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
|
2010-07-14 04:32:21 +08:00
|
|
|
CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
|
|
|
|
CGF.EmitBlock(RunDtorBB);
|
|
|
|
}
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2018-03-30 01:56:24 +08:00
|
|
|
static_cast<Derived *>(this)->emitDestructorCall(CGF);
|
|
|
|
|
|
|
|
if (NRVO) CGF.EmitBlock(SkipDtorBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~DestroyNRVOVariable() = default;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct DestroyNRVOVariableCXX final
|
|
|
|
: DestroyNRVOVariable<DestroyNRVOVariableCXX> {
|
2019-07-22 17:39:13 +08:00
|
|
|
DestroyNRVOVariableCXX(Address addr, QualType type,
|
|
|
|
const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
|
|
|
|
: DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
|
|
|
|
Dtor(Dtor) {}
|
2018-03-30 01:56:24 +08:00
|
|
|
|
|
|
|
const CXXDestructorDecl *Dtor;
|
|
|
|
|
|
|
|
void emitDestructorCall(CodeGenFunction &CGF) {
|
2010-07-14 04:32:21 +08:00
|
|
|
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
|
2013-01-31 13:50:40 +08:00
|
|
|
/*ForVirtualBase=*/false,
|
2019-07-22 17:39:13 +08:00
|
|
|
/*Delegating=*/false, Loc, Ty);
|
2018-03-30 01:56:24 +08:00
|
|
|
}
|
|
|
|
};
|
2010-07-14 04:32:21 +08:00
|
|
|
|
2018-03-30 01:56:24 +08:00
|
|
|
struct DestroyNRVOVariableC final
|
|
|
|
: DestroyNRVOVariable<DestroyNRVOVariableC> {
|
|
|
|
DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
|
2019-07-22 17:39:13 +08:00
|
|
|
: DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
|
2018-03-30 01:56:24 +08:00
|
|
|
|
|
|
|
void emitDestructorCall(CodeGenFunction &CGF) {
|
|
|
|
CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
|
2010-07-14 04:32:21 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallStackRestore final : EHScopeStack::Cleanup {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Stack;
|
|
|
|
CallStackRestore(Address Stack) : Stack(Stack) {}
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-09-28 05:06:10 +08:00
|
|
|
llvm::Value *V = CGF.Builder.CreateLoad(Stack);
|
2019-02-04 05:53:49 +08:00
|
|
|
llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
|
2010-07-21 14:13:08 +08:00
|
|
|
CGF.Builder.CreateCall(F, V);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-08-19 06:40:54 +08:00
|
|
|
struct ExtendGCLifetime final : EHScopeStack::Cleanup {
|
2011-06-25 07:21:27 +08:00
|
|
|
const VarDecl &Var;
|
|
|
|
ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-06-25 07:21:27 +08:00
|
|
|
// Compute the address of the local variable, in case it's a
|
|
|
|
// byref or something.
|
2018-12-21 22:10:18 +08:00
|
|
|
DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
|
2012-03-10 17:33:50 +08:00
|
|
|
Var.getType(), VK_LValue, SourceLocation());
|
2013-10-02 10:29:49 +08:00
|
|
|
llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
|
|
|
|
SourceLocation());
|
2011-06-25 07:21:27 +08:00
|
|
|
CGF.EmitExtendGCLifetime(value);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallCleanupFunction final : EHScopeStack::Cleanup {
|
2010-07-21 14:13:08 +08:00
|
|
|
llvm::Constant *CleanupFn;
|
|
|
|
const CGFunctionInfo &FnInfo;
|
|
|
|
const VarDecl &Var;
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2010-07-21 14:13:08 +08:00
|
|
|
CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
|
2011-02-22 14:44:22 +08:00
|
|
|
const VarDecl *Var)
|
|
|
|
: CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
|
2010-07-21 14:13:08 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2018-12-21 22:10:18 +08:00
|
|
|
DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
|
2012-03-10 17:33:50 +08:00
|
|
|
Var.getType(), VK_LValue, SourceLocation());
|
2011-02-22 14:44:22 +08:00
|
|
|
// Compute the address of the local variable, in case it's a byref
|
|
|
|
// or something.
|
2019-12-04 07:17:01 +08:00
|
|
|
llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF);
|
2011-02-22 14:44:22 +08:00
|
|
|
|
2010-07-21 14:13:08 +08:00
|
|
|
// In some cases, the type of the function argument will be different from
|
|
|
|
// the type of the pointer. An example of this is
|
|
|
|
// void f(void* arg);
|
|
|
|
// __attribute__((cleanup(f))) void *g;
|
|
|
|
//
|
|
|
|
// To fix this we insert a bitcast here.
|
|
|
|
QualType ArgTy = FnInfo.arg_begin()->type;
|
|
|
|
llvm::Value *Arg =
|
|
|
|
CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
|
|
|
|
|
|
|
|
CallArgList Args;
|
2011-05-03 01:57:46 +08:00
|
|
|
Args.add(RValue::get(Arg),
|
|
|
|
CGF.getContext().getPointerType(Var.getType()));
|
2016-10-27 07:46:34 +08:00
|
|
|
auto Callee = CGCallee::forDirect(CleanupFn);
|
|
|
|
CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
|
2010-07-21 14:13:08 +08:00
|
|
|
}
|
|
|
|
};
|
2016-02-11 03:11:58 +08:00
|
|
|
} // end anonymous namespace
|
2010-07-21 14:13:08 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
/// EmitAutoVarWithLifetime - Does the setup required for an automatic
|
|
|
|
/// variable with lifetime.
|
|
|
|
static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr,
|
2011-06-16 07:02:42 +08:00
|
|
|
Qualifiers::ObjCLifetime lifetime) {
|
|
|
|
switch (lifetime) {
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
llvm_unreachable("present but none");
|
|
|
|
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
// nothing to do
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Strong: {
|
2012-01-26 11:33:36 +08:00
|
|
|
CodeGenFunction::Destroyer *destroyer =
|
2011-07-13 00:41:08 +08:00
|
|
|
(var.hasAttr<ObjCPreciseLifetimeAttr>()
|
|
|
|
? CodeGenFunction::destroyARCStrongPrecise
|
|
|
|
: CodeGenFunction::destroyARCStrongImprecise);
|
|
|
|
|
|
|
|
CleanupKind cleanupKind = CGF.getARCCleanupKind();
|
|
|
|
CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
|
|
|
|
cleanupKind & EHCleanup);
|
2011-06-16 07:02:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
// nothing to do
|
|
|
|
break;
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
// __weak objects always get EH cleanups; otherwise, exceptions
|
|
|
|
// could cause really nasty crashes instead of mere leaks.
|
2011-07-13 00:41:08 +08:00
|
|
|
CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
|
|
|
|
CodeGenFunction::destroyARCWeak,
|
|
|
|
/*useEHCleanup*/ true);
|
2011-06-16 07:02:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
|
|
|
|
if (const Expr *e = dyn_cast<Expr>(s)) {
|
|
|
|
// Skip the most common kinds of expressions that make
|
|
|
|
// hierarchy-walking expensive.
|
|
|
|
s = e = e->IgnoreParenCasts();
|
|
|
|
|
|
|
|
if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
|
|
|
|
return (ref->getDecl() == &var);
|
2012-06-20 04:53:26 +08:00
|
|
|
if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
|
|
|
|
const BlockDecl *block = be->getBlockDecl();
|
2014-03-15 02:34:04 +08:00
|
|
|
for (const auto &I : block->captures()) {
|
|
|
|
if (I.getVariable() == &var)
|
2012-06-20 04:53:26 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
2015-07-03 05:03:14 +08:00
|
|
|
for (const Stmt *SubStmt : s->children())
|
|
|
|
// SubStmt might be null; as in missing decl or conditional of an if-stmt.
|
|
|
|
if (SubStmt && isAccessedBy(var, SubStmt))
|
2011-06-16 07:02:42 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
|
|
|
|
if (!decl) return false;
|
|
|
|
if (!isa<VarDecl>(decl)) return false;
|
|
|
|
const VarDecl *var = cast<VarDecl>(decl);
|
|
|
|
return isAccessedBy(*var, e);
|
|
|
|
}
|
|
|
|
|
2015-10-22 02:06:31 +08:00
|
|
|
static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
|
|
|
|
const LValue &destLV, const Expr *init) {
|
2015-11-17 06:11:41 +08:00
|
|
|
bool needsCast = false;
|
|
|
|
|
2015-10-22 02:06:31 +08:00
|
|
|
while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
|
|
|
|
switch (castExpr->getCastKind()) {
|
|
|
|
// Look through casts that don't require representation changes.
|
|
|
|
case CK_NoOp:
|
|
|
|
case CK_BitCast:
|
|
|
|
case CK_BlockPointerToObjCPointerCast:
|
2015-11-17 06:11:41 +08:00
|
|
|
needsCast = true;
|
2015-10-22 02:06:31 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
// If we find an l-value to r-value cast from a __weak variable,
|
|
|
|
// emit this operation as a copy or move.
|
|
|
|
case CK_LValueToRValue: {
|
|
|
|
const Expr *srcExpr = castExpr->getSubExpr();
|
|
|
|
if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Emit the source l-value.
|
|
|
|
LValue srcLV = CGF.EmitLValue(srcExpr);
|
|
|
|
|
2015-11-17 06:11:41 +08:00
|
|
|
// Handle a formal type change to avoid asserting.
|
2019-12-04 07:17:01 +08:00
|
|
|
auto srcAddr = srcLV.getAddress(CGF);
|
2015-11-17 06:11:41 +08:00
|
|
|
if (needsCast) {
|
2019-12-04 07:17:01 +08:00
|
|
|
srcAddr = CGF.Builder.CreateElementBitCast(
|
|
|
|
srcAddr, destLV.getAddress(CGF).getElementType());
|
2015-11-17 06:11:41 +08:00
|
|
|
}
|
|
|
|
|
2015-10-22 02:06:31 +08:00
|
|
|
// If it was an l-value, use objc_copyWeak.
|
|
|
|
if (srcExpr->getValueKind() == VK_LValue) {
|
2019-12-04 07:17:01 +08:00
|
|
|
CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr);
|
2015-10-22 02:06:31 +08:00
|
|
|
} else {
|
|
|
|
assert(srcExpr->getValueKind() == VK_XValue);
|
2019-12-04 07:17:01 +08:00
|
|
|
CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr);
|
2015-10-22 02:06:31 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop at anything else.
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
init = castExpr->getSubExpr();
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-06-16 12:16:24 +08:00
|
|
|
static void drillIntoBlockVariable(CodeGenFunction &CGF,
|
|
|
|
LValue &lvalue,
|
|
|
|
const VarDecl *var) {
|
2019-12-04 07:17:01 +08:00
|
|
|
lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var));
|
2011-06-16 12:16:24 +08:00
|
|
|
}
|
|
|
|
|
2017-03-14 09:56:34 +08:00
|
|
|
void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
|
|
|
|
SourceLocation Loc) {
|
|
|
|
if (!SanOpts.has(SanitizerKind::NullabilityAssign))
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto Nullability = LHS.getType()->getNullability(getContext());
|
|
|
|
if (!Nullability || *Nullability != NullabilityKind::NonNull)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Check if the right hand side of the assignment is nonnull, if the left
|
|
|
|
// hand side must be nonnull.
|
|
|
|
SanitizerScope SanScope(this);
|
|
|
|
llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
|
|
|
|
llvm::Constant *StaticData[] = {
|
|
|
|
EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
|
2017-03-15 05:43:52 +08:00
|
|
|
llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
|
2017-03-15 00:48:29 +08:00
|
|
|
llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
|
2017-03-14 09:56:34 +08:00
|
|
|
EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
|
|
|
|
SanitizerHandler::TypeMismatch, StaticData, RHS);
|
|
|
|
}
|
|
|
|
|
2014-12-09 08:32:22 +08:00
|
|
|
void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
|
2015-01-14 15:38:27 +08:00
|
|
|
LValue lvalue, bool capturedByInit) {
|
2011-06-16 12:16:24 +08:00
|
|
|
Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
|
2011-06-16 07:02:42 +08:00
|
|
|
if (!lifetime) {
|
|
|
|
llvm::Value *value = EmitScalarExpr(init);
|
2011-06-16 12:16:24 +08:00
|
|
|
if (capturedByInit)
|
|
|
|
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
|
2017-03-14 09:56:34 +08:00
|
|
|
EmitNullabilityCheck(lvalue, value, init->getExprLoc());
|
2015-01-14 15:38:27 +08:00
|
|
|
EmitStoreThroughLValue(RValue::get(value), lvalue, true);
|
2011-06-16 07:02:42 +08:00
|
|
|
return;
|
|
|
|
}
|
2016-06-02 08:24:20 +08:00
|
|
|
|
2014-03-14 23:40:54 +08:00
|
|
|
if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
|
|
|
|
init = DIE->getExpr();
|
2016-06-02 08:24:20 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
// If we're emitting a value with lifetime, we have to do the
|
|
|
|
// initialization *before* we leave the cleanup scopes.
|
2018-10-31 11:48:47 +08:00
|
|
|
if (const FullExpr *fe = dyn_cast<FullExpr>(init)) {
|
|
|
|
enterFullExpression(fe);
|
|
|
|
init = fe->getSubExpr();
|
2011-11-10 16:15:53 +08:00
|
|
|
}
|
|
|
|
CodeGenFunction::RunCleanupsScope Scope(*this);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// We have to maintain the illusion that the variable is
|
|
|
|
// zero-initialized. If the variable might be accessed in its
|
|
|
|
// initializer, zero-initialize before running the initializer, then
|
|
|
|
// actually perform the initialization with an assign.
|
|
|
|
bool accessedByInit = false;
|
|
|
|
if (lifetime != Qualifiers::OCL_ExplicitNone)
|
2011-07-28 15:23:35 +08:00
|
|
|
accessedByInit = (capturedByInit || isAccessedBy(D, init));
|
2011-06-16 07:02:42 +08:00
|
|
|
if (accessedByInit) {
|
2011-06-16 12:16:24 +08:00
|
|
|
LValue tempLV = lvalue;
|
2011-06-16 07:02:42 +08:00
|
|
|
// Drill down to the __block object if necessary.
|
|
|
|
if (capturedByInit) {
|
|
|
|
// We can use a simple GEP for this because it can't have been
|
|
|
|
// moved yet.
|
2019-12-04 07:17:01 +08:00
|
|
|
tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this),
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
cast<VarDecl>(D),
|
|
|
|
/*follow*/ false));
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
2019-12-04 07:17:01 +08:00
|
|
|
auto ty =
|
|
|
|
cast<llvm::PointerType>(tempLV.getAddress(*this).getElementType());
|
2016-12-15 16:09:08 +08:00
|
|
|
llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
// If __weak, we want to use a barrier under certain conditions.
|
|
|
|
if (lifetime == Qualifiers::OCL_Weak)
|
2019-12-04 07:17:01 +08:00
|
|
|
EmitARCInitWeak(tempLV.getAddress(*this), zero);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// Otherwise just do a simple store.
|
|
|
|
else
|
2012-01-17 01:27:18 +08:00
|
|
|
EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the initializer.
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *value = nullptr;
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
switch (lifetime) {
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
llvm_unreachable("present but none");
|
|
|
|
|
[ObjCARC] Add an new attribute, objc_externally_retained
This attribute, called "objc_externally_retained", exposes clang's
notion of pseudo-__strong variables in ARC. Pseudo-strong variables
"borrow" their initializer, meaning that they don't retain/release
it, instead assuming that someone else is keeping their value alive.
If a function is annotated with this attribute, implicitly strong
parameters of that function aren't implicitly retained/released in
the function body, and are implicitly const. This is useful to expose
for performance reasons, most functions don't need the extra safety
of the retain/release, so programmers can opt out as needed.
This attribute can also apply to declarations of local variables,
with similar effect.
Differential revision: https://reviews.llvm.org/D55865
llvm-svn: 350422
2019-01-05 02:33:06 +08:00
|
|
|
case Qualifiers::OCL_Strong: {
|
|
|
|
if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
|
|
|
|
value = EmitARCRetainScalarExpr(init);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// If D is pseudo-strong, treat it like __unsafe_unretained here. This means
|
|
|
|
// that we omit the retain, and causes non-autoreleased return values to be
|
|
|
|
// immediately released.
|
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
2016-01-28 02:32:30 +08:00
|
|
|
value = EmitARCUnsafeUnretainedScalarExpr(init);
|
2011-06-16 07:02:42 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Weak: {
|
2015-10-22 02:06:31 +08:00
|
|
|
// If it's not accessed by the initializer, try to emit the
|
|
|
|
// initialization with a copy or move.
|
|
|
|
if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
// No way to optimize a producing initializer into this. It's not
|
|
|
|
// worth optimizing for, because the value will immediately
|
|
|
|
// disappear in the common case.
|
|
|
|
value = EmitScalarExpr(init);
|
|
|
|
|
2011-06-16 12:16:24 +08:00
|
|
|
if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
|
2011-06-16 07:02:42 +08:00
|
|
|
if (accessedByInit)
|
2019-12-04 07:17:01 +08:00
|
|
|
EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true);
|
2011-06-16 07:02:42 +08:00
|
|
|
else
|
2019-12-04 07:17:01 +08:00
|
|
|
EmitARCInitWeak(lvalue.getAddress(*this), value);
|
2011-06-16 07:02:42 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
value = EmitARCRetainAutoreleaseScalarExpr(init);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-06-16 12:16:24 +08:00
|
|
|
if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2017-03-14 09:56:34 +08:00
|
|
|
EmitNullabilityCheck(lvalue, value, init->getExprLoc());
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
// If the variable might have been accessed by its initializer, we
|
|
|
|
// might have to initialize with a barrier. We have to do this for
|
|
|
|
// both __weak and __strong, but __weak got filtered out above.
|
|
|
|
if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
|
2013-10-02 10:29:49 +08:00
|
|
|
llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
|
2012-01-17 01:27:18 +08:00
|
|
|
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
|
2013-03-13 11:10:54 +08:00
|
|
|
EmitARCRelease(oldValue, ARCImpreciseLifetime);
|
2011-06-16 07:02:42 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-01-17 01:27:18 +08:00
|
|
|
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
2010-12-01 10:05:19 +08:00
|
|
|
|
2018-07-21 07:37:12 +08:00
|
|
|
/// Decide whether we can emit the non-zero parts of the specified initializer
|
|
|
|
/// with equal or fewer than NumStores scalar stores.
|
|
|
|
static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
|
|
|
|
unsigned &NumStores) {
|
Enhance the init generation logic to emit a memset followed by a few stores when
a global is larger than 32 bytes and has fewer than 6 non-zero values in the
initializer. Previously we'd turn something like this:
char test8(int X) {
char str[10000] = "abc";
into a 10K global variable which we then memcpy'd from. Now we generate:
%str = alloca [10000 x i8], align 16
%tmp = getelementptr inbounds [10000 x i8]* %str, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 10000, i32 16, i1 false)
store i8 97, i8* %tmp, align 16
%0 = getelementptr [10000 x i8]* %str, i64 0, i64 1
store i8 98, i8* %0, align 1
%1 = getelementptr [10000 x i8]* %str, i64 0, i64 2
store i8 99, i8* %1, align 2
Which is much smaller in space and also likely faster.
This is part of PR279
llvm-svn: 120645
2010-12-02 09:58:41 +08:00
|
|
|
// Zero and Undef never requires any extra stores.
|
|
|
|
if (isa<llvm::ConstantAggregateZero>(Init) ||
|
|
|
|
isa<llvm::ConstantPointerNull>(Init) ||
|
|
|
|
isa<llvm::UndefValue>(Init))
|
|
|
|
return true;
|
|
|
|
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
|
|
|
|
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
|
|
|
|
isa<llvm::ConstantExpr>(Init))
|
|
|
|
return Init->isNullValue() || NumStores--;
|
|
|
|
|
|
|
|
// See if we can emit each element.
|
|
|
|
if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
|
|
|
|
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
|
|
|
|
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
|
2018-07-21 07:37:12 +08:00
|
|
|
if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
|
Enhance the init generation logic to emit a memset followed by a few stores when
a global is larger than 32 bytes and has fewer than 6 non-zero values in the
initializer. Previously we'd turn something like this:
char test8(int X) {
char str[10000] = "abc";
into a 10K global variable which we then memcpy'd from. Now we generate:
%str = alloca [10000 x i8], align 16
%tmp = getelementptr inbounds [10000 x i8]* %str, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 10000, i32 16, i1 false)
store i8 97, i8* %tmp, align 16
%0 = getelementptr [10000 x i8]* %str, i64 0, i64 1
store i8 98, i8* %0, align 1
%1 = getelementptr [10000 x i8]* %str, i64 0, i64 2
store i8 99, i8* %1, align 2
Which is much smaller in space and also likely faster.
This is part of PR279
llvm-svn: 120645
2010-12-02 09:58:41 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2016-06-02 08:24:20 +08:00
|
|
|
|
2012-01-31 12:36:19 +08:00
|
|
|
if (llvm::ConstantDataSequential *CDS =
|
|
|
|
dyn_cast<llvm::ConstantDataSequential>(Init)) {
|
|
|
|
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
|
|
|
|
llvm::Constant *Elt = CDS->getElementAsConstant(i);
|
2018-07-21 07:37:12 +08:00
|
|
|
if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
|
2012-01-31 12:36:19 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2010-12-01 10:05:19 +08:00
|
|
|
// Anything else is hard and scary.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-07-21 07:37:12 +08:00
|
|
|
/// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
|
|
|
|
/// the scalar stores that would be required.
|
|
|
|
static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
|
|
|
|
llvm::Constant *Init, Address Loc,
|
|
|
|
bool isVolatile, CGBuilderTy &Builder) {
|
2012-08-28 06:07:02 +08:00
|
|
|
assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
|
2018-07-21 07:37:12 +08:00
|
|
|
"called emitStoresForInitAfterBZero for zero or undef value.");
|
2011-08-24 06:38:00 +08:00
|
|
|
|
Enhance the init generation logic to emit a memset followed by a few stores when
a global is larger than 32 bytes and has fewer than 6 non-zero values in the
initializer. Previously we'd turn something like this:
char test8(int X) {
char str[10000] = "abc";
into a 10K global variable which we then memcpy'd from. Now we generate:
%str = alloca [10000 x i8], align 16
%tmp = getelementptr inbounds [10000 x i8]* %str, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 10000, i32 16, i1 false)
store i8 97, i8* %tmp, align 16
%0 = getelementptr [10000 x i8]* %str, i64 0, i64 1
store i8 98, i8* %0, align 1
%1 = getelementptr [10000 x i8]* %str, i64 0, i64 2
store i8 99, i8* %1, align 2
Which is much smaller in space and also likely faster.
This is part of PR279
llvm-svn: 120645
2010-12-02 09:58:41 +08:00
|
|
|
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
|
|
|
|
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
|
|
|
|
isa<llvm::ConstantExpr>(Init)) {
|
2018-07-14 04:33:23 +08:00
|
|
|
Builder.CreateStore(Init, Loc, isVolatile);
|
2012-01-31 12:36:19 +08:00
|
|
|
return;
|
|
|
|
}
|
2016-06-02 08:24:20 +08:00
|
|
|
|
|
|
|
if (llvm::ConstantDataSequential *CDS =
|
|
|
|
dyn_cast<llvm::ConstantDataSequential>(Init)) {
|
2012-01-31 12:36:19 +08:00
|
|
|
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
|
|
|
|
llvm::Constant *Elt = CDS->getElementAsConstant(i);
|
2012-08-28 05:35:58 +08:00
|
|
|
|
|
|
|
// If necessary, get a pointer to the element and emit it.
|
|
|
|
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
|
2018-07-21 07:37:12 +08:00
|
|
|
emitStoresForInitAfterBZero(
|
2019-02-10 06:22:28 +08:00
|
|
|
CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
|
|
|
|
Builder);
|
2012-01-31 12:36:19 +08:00
|
|
|
}
|
Enhance the init generation logic to emit a memset followed by a few stores when
a global is larger than 32 bytes and has fewer than 6 non-zero values in the
initializer. Previously we'd turn something like this:
char test8(int X) {
char str[10000] = "abc";
into a 10K global variable which we then memcpy'd from. Now we generate:
%str = alloca [10000 x i8], align 16
%tmp = getelementptr inbounds [10000 x i8]* %str, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 10000, i32 16, i1 false)
store i8 97, i8* %tmp, align 16
%0 = getelementptr [10000 x i8]* %str, i64 0, i64 1
store i8 98, i8* %0, align 1
%1 = getelementptr [10000 x i8]* %str, i64 0, i64 2
store i8 99, i8* %1, align 2
Which is much smaller in space and also likely faster.
This is part of PR279
llvm-svn: 120645
2010-12-02 09:58:41 +08:00
|
|
|
return;
|
|
|
|
}
|
2011-08-24 06:38:00 +08:00
|
|
|
|
Enhance the init generation logic to emit a memset followed by a few stores when
a global is larger than 32 bytes and has fewer than 6 non-zero values in the
initializer. Previously we'd turn something like this:
char test8(int X) {
char str[10000] = "abc";
into a 10K global variable which we then memcpy'd from. Now we generate:
%str = alloca [10000 x i8], align 16
%tmp = getelementptr inbounds [10000 x i8]* %str, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 10000, i32 16, i1 false)
store i8 97, i8* %tmp, align 16
%0 = getelementptr [10000 x i8]* %str, i64 0, i64 1
store i8 98, i8* %0, align 1
%1 = getelementptr [10000 x i8]* %str, i64 0, i64 2
store i8 99, i8* %1, align 2
Which is much smaller in space and also likely faster.
This is part of PR279
llvm-svn: 120645
2010-12-02 09:58:41 +08:00
|
|
|
assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
|
|
|
|
"Unknown value type!");
|
2011-08-24 06:38:00 +08:00
|
|
|
|
Enhance the init generation logic to emit a memset followed by a few stores when
a global is larger than 32 bytes and has fewer than 6 non-zero values in the
initializer. Previously we'd turn something like this:
char test8(int X) {
char str[10000] = "abc";
into a 10K global variable which we then memcpy'd from. Now we generate:
%str = alloca [10000 x i8], align 16
%tmp = getelementptr inbounds [10000 x i8]* %str, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 10000, i32 16, i1 false)
store i8 97, i8* %tmp, align 16
%0 = getelementptr [10000 x i8]* %str, i64 0, i64 1
store i8 98, i8* %0, align 1
%1 = getelementptr [10000 x i8]* %str, i64 0, i64 2
store i8 99, i8* %1, align 2
Which is much smaller in space and also likely faster.
This is part of PR279
llvm-svn: 120645
2010-12-02 09:58:41 +08:00
|
|
|
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
|
|
|
|
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
|
2012-08-28 05:35:58 +08:00
|
|
|
|
|
|
|
// If necessary, get a pointer to the element and emit it.
|
|
|
|
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
|
2019-02-10 06:22:28 +08:00
|
|
|
emitStoresForInitAfterBZero(CGM, Elt,
|
|
|
|
Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
|
|
|
|
isVolatile, Builder);
|
Enhance the init generation logic to emit a memset followed by a few stores when
a global is larger than 32 bytes and has fewer than 6 non-zero values in the
initializer. Previously we'd turn something like this:
char test8(int X) {
char str[10000] = "abc";
into a 10K global variable which we then memcpy'd from. Now we generate:
%str = alloca [10000 x i8], align 16
%tmp = getelementptr inbounds [10000 x i8]* %str, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 10000, i32 16, i1 false)
store i8 97, i8* %tmp, align 16
%0 = getelementptr [10000 x i8]* %str, i64 0, i64 1
store i8 98, i8* %0, align 1
%1 = getelementptr [10000 x i8]* %str, i64 0, i64 2
store i8 99, i8* %1, align 2
Which is much smaller in space and also likely faster.
This is part of PR279
llvm-svn: 120645
2010-12-02 09:58:41 +08:00
|
|
|
}
|
2010-12-01 10:05:19 +08:00
|
|
|
}
|
|
|
|
|
2018-07-21 07:37:12 +08:00
|
|
|
/// Decide whether we should use bzero plus some stores to initialize a local
|
|
|
|
/// variable instead of using a memcpy from a constant global. It is beneficial
|
|
|
|
/// to use bzero if the global is all zeros, or mostly zeros and large.
|
|
|
|
static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
|
|
|
|
uint64_t GlobalSize) {
|
|
|
|
// If a global is all zeros, always use a bzero.
|
2010-12-01 10:05:19 +08:00
|
|
|
if (isa<llvm::ConstantAggregateZero>(Init)) return true;
|
|
|
|
|
|
|
|
// If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
|
|
|
|
// do it if it will require 6 or fewer scalar stores.
|
|
|
|
// TODO: Should budget depends on the size? Avoiding a large global warrants
|
|
|
|
// plopping in more stores.
|
|
|
|
unsigned StoreBudget = 6;
|
|
|
|
uint64_t SizeLimit = 32;
|
2011-08-24 06:38:00 +08:00
|
|
|
|
|
|
|
return GlobalSize > SizeLimit &&
|
2018-07-21 07:37:12 +08:00
|
|
|
canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
|
2010-12-01 10:05:19 +08:00
|
|
|
}
|
|
|
|
|
2018-07-25 12:29:03 +08:00
|
|
|
/// Decide whether we should use memset to initialize a local variable instead
|
|
|
|
/// of using a memcpy from a constant global. Assumes we've already decided to
|
|
|
|
/// not user bzero.
|
|
|
|
/// FIXME We could be more clever, as we are for bzero above, and generate
|
|
|
|
/// memset followed by stores. It's unclear that's worth the effort.
|
2018-09-21 21:54:09 +08:00
|
|
|
static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
|
2019-07-11 06:53:52 +08:00
|
|
|
uint64_t GlobalSize,
|
|
|
|
const llvm::DataLayout &DL) {
|
2018-07-25 12:29:03 +08:00
|
|
|
uint64_t SizeLimit = 32;
|
|
|
|
if (GlobalSize <= SizeLimit)
|
2018-09-21 21:54:09 +08:00
|
|
|
return nullptr;
|
2019-07-11 06:53:52 +08:00
|
|
|
return llvm::isBytewiseValue(Init, DL);
|
2018-07-25 12:29:03 +08:00
|
|
|
}
|
|
|
|
|
2019-03-08 09:26:49 +08:00
|
|
|
/// Decide whether we want to split a constant structure or array store into a
|
|
|
|
/// sequence of its fields' stores. This may cost us code size and compilation
|
|
|
|
/// speed, but plays better with store optimizations.
|
|
|
|
static bool shouldSplitConstantStore(CodeGenModule &CGM,
|
|
|
|
uint64_t GlobalByteSize) {
|
|
|
|
// Don't break things that occupy more than one cacheline.
|
2019-03-01 17:00:41 +08:00
|
|
|
uint64_t ByteSizeLimit = 64;
|
|
|
|
if (CGM.getCodeGenOpts().OptimizationLevel == 0)
|
|
|
|
return false;
|
|
|
|
if (GlobalByteSize <= ByteSizeLimit)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-02-26 18:46:21 +08:00
|
|
|
enum class IsPattern { No, Yes };
|
|
|
|
|
|
|
|
/// Generate a constant filled with either a pattern or zeroes.
|
|
|
|
static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
|
|
|
|
llvm::Type *Ty) {
|
|
|
|
if (isPattern == IsPattern::Yes)
|
2019-04-12 08:11:27 +08:00
|
|
|
return initializationPatternFor(CGM, Ty);
|
2019-02-26 18:46:21 +08:00
|
|
|
else
|
|
|
|
return llvm::Constant::getNullValue(Ty);
|
|
|
|
}
|
|
|
|
|
|
|
|
static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
|
|
|
|
llvm::Constant *constant);
|
|
|
|
|
|
|
|
/// Helper function for constWithPadding() to deal with padding in structures.
|
|
|
|
static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
|
|
|
|
IsPattern isPattern,
|
|
|
|
llvm::StructType *STy,
|
|
|
|
llvm::Constant *constant) {
|
|
|
|
const llvm::DataLayout &DL = CGM.getDataLayout();
|
|
|
|
const llvm::StructLayout *Layout = DL.getStructLayout(STy);
|
|
|
|
llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
|
|
|
|
unsigned SizeSoFar = 0;
|
|
|
|
SmallVector<llvm::Constant *, 8> Values;
|
|
|
|
bool NestedIntact = true;
|
|
|
|
for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
|
|
|
|
unsigned CurOff = Layout->getElementOffset(i);
|
|
|
|
if (SizeSoFar < CurOff) {
|
|
|
|
assert(!STy->isPacked());
|
|
|
|
auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
|
|
|
|
Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
|
|
|
|
}
|
|
|
|
llvm::Constant *CurOp;
|
|
|
|
if (constant->isZeroValue())
|
|
|
|
CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
|
|
|
|
else
|
|
|
|
CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
|
|
|
|
auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
|
|
|
|
if (CurOp != NewOp)
|
|
|
|
NestedIntact = false;
|
|
|
|
Values.push_back(NewOp);
|
|
|
|
SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
|
|
|
|
}
|
|
|
|
unsigned TotalSize = Layout->getSizeInBytes();
|
|
|
|
if (SizeSoFar < TotalSize) {
|
|
|
|
auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
|
|
|
|
Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
|
|
|
|
}
|
|
|
|
if (NestedIntact && Values.size() == STy->getNumElements())
|
|
|
|
return constant;
|
2019-03-17 03:25:39 +08:00
|
|
|
return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
|
2019-02-26 18:46:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Replace all padding bytes in a given constant with either a pattern byte or
|
|
|
|
/// 0x00.
|
|
|
|
static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
|
|
|
|
llvm::Constant *constant) {
|
|
|
|
llvm::Type *OrigTy = constant->getType();
|
|
|
|
if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
|
|
|
|
return constStructWithPadding(CGM, isPattern, STy, constant);
|
|
|
|
if (auto *STy = dyn_cast<llvm::SequentialType>(OrigTy)) {
|
|
|
|
llvm::SmallVector<llvm::Constant *, 8> Values;
|
|
|
|
unsigned Size = STy->getNumElements();
|
|
|
|
if (!Size)
|
|
|
|
return constant;
|
|
|
|
llvm::Type *ElemTy = STy->getElementType();
|
|
|
|
bool ZeroInitializer = constant->isZeroValue();
|
|
|
|
llvm::Constant *OpValue, *PaddedOp;
|
|
|
|
if (ZeroInitializer) {
|
|
|
|
OpValue = llvm::Constant::getNullValue(ElemTy);
|
|
|
|
PaddedOp = constWithPadding(CGM, isPattern, OpValue);
|
|
|
|
}
|
|
|
|
for (unsigned Op = 0; Op != Size; ++Op) {
|
|
|
|
if (!ZeroInitializer) {
|
|
|
|
OpValue = constant->getAggregateElement(Op);
|
|
|
|
PaddedOp = constWithPadding(CGM, isPattern, OpValue);
|
|
|
|
}
|
|
|
|
Values.push_back(PaddedOp);
|
|
|
|
}
|
|
|
|
auto *NewElemTy = Values[0]->getType();
|
|
|
|
if (NewElemTy == ElemTy)
|
|
|
|
return constant;
|
|
|
|
if (OrigTy->isArrayTy()) {
|
|
|
|
auto *ArrayTy = llvm::ArrayType::get(NewElemTy, Size);
|
|
|
|
return llvm::ConstantArray::get(ArrayTy, Values);
|
|
|
|
} else {
|
|
|
|
return llvm::ConstantVector::get(Values);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return constant;
|
|
|
|
}
|
|
|
|
|
2019-06-15 01:46:37 +08:00
|
|
|
Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
|
|
|
|
llvm::Constant *Constant,
|
|
|
|
CharUnits Align) {
|
2018-11-15 08:19:18 +08:00
|
|
|
auto FunctionName = [&](const DeclContext *DC) -> std::string {
|
|
|
|
if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
|
|
|
|
if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
|
|
|
|
return CC->getNameAsString();
|
|
|
|
if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
|
|
|
|
return CD->getNameAsString();
|
2019-06-15 01:46:37 +08:00
|
|
|
return getMangledName(FD);
|
2018-11-15 08:19:18 +08:00
|
|
|
} else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
|
|
|
|
return OM->getNameAsString();
|
2018-11-15 21:01:54 +08:00
|
|
|
} else if (isa<BlockDecl>(DC)) {
|
2018-11-15 08:19:18 +08:00
|
|
|
return "<block>";
|
2018-11-15 21:01:54 +08:00
|
|
|
} else if (isa<CapturedDecl>(DC)) {
|
2018-11-15 08:19:18 +08:00
|
|
|
return "<captured>";
|
|
|
|
} else {
|
2019-06-15 01:46:37 +08:00
|
|
|
llvm_unreachable("expected a function or method");
|
2018-11-15 08:19:18 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-06-15 01:46:37 +08:00
|
|
|
// Form a simple per-variable cache of these values in case we find we
|
|
|
|
// want to reuse them.
|
|
|
|
llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
|
|
|
|
if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
|
|
|
|
auto *Ty = Constant->getType();
|
|
|
|
bool isConstant = true;
|
|
|
|
llvm::GlobalVariable *InsertBefore = nullptr;
|
|
|
|
unsigned AS =
|
|
|
|
getContext().getTargetAddressSpace(getStringLiteralAddressSpace());
|
|
|
|
std::string Name;
|
|
|
|
if (D.hasGlobalStorage())
|
|
|
|
Name = getMangledName(&D).str() + ".const";
|
|
|
|
else if (const DeclContext *DC = D.getParentFunctionOrMethod())
|
|
|
|
Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
|
|
|
|
else
|
|
|
|
llvm_unreachable("local variable has no parent function or method");
|
|
|
|
llvm::GlobalVariable *GV = new llvm::GlobalVariable(
|
|
|
|
getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
|
|
|
|
Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
|
2019-10-03 21:00:29 +08:00
|
|
|
GV->setAlignment(Align.getAsAlign());
|
2019-06-15 01:46:37 +08:00
|
|
|
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
|
|
|
|
CacheEntry = GV;
|
|
|
|
} else if (CacheEntry->getAlignment() < Align.getQuantity()) {
|
2019-10-03 21:00:29 +08:00
|
|
|
CacheEntry->setAlignment(Align.getAsAlign());
|
2019-06-15 01:46:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return Address(CacheEntry, Align);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
|
|
|
|
const VarDecl &D,
|
|
|
|
CGBuilderTy &Builder,
|
|
|
|
llvm::Constant *Constant,
|
|
|
|
CharUnits Align) {
|
|
|
|
Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
|
|
|
|
llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(),
|
|
|
|
SrcPtr.getAddressSpace());
|
2018-11-15 08:19:18 +08:00
|
|
|
if (SrcPtr.getType() != BP)
|
|
|
|
SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
|
|
|
|
return SrcPtr;
|
|
|
|
}
|
|
|
|
|
2018-08-08 05:55:13 +08:00
|
|
|
static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
|
|
|
|
Address Loc, bool isVolatile,
|
|
|
|
CGBuilderTy &Builder,
|
|
|
|
llvm::Constant *constant) {
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
auto *Ty = constant->getType();
|
2019-06-27 10:08:15 +08:00
|
|
|
uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
|
|
|
|
if (!ConstantSize)
|
|
|
|
return;
|
|
|
|
|
2019-03-08 09:26:49 +08:00
|
|
|
bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
|
|
|
|
Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
|
|
|
|
if (canDoSingleStore) {
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
Builder.CreateStore(constant, Loc, isVolatile);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-06-27 09:34:21 +08:00
|
|
|
auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
|
2019-03-08 09:26:49 +08:00
|
|
|
|
|
|
|
// If the initializer is all or mostly the same, codegen with bzero / memset
|
|
|
|
// then do a few stores afterward.
|
2018-08-08 05:55:13 +08:00
|
|
|
if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
|
2019-06-27 09:34:21 +08:00
|
|
|
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0), SizeVal,
|
2018-08-08 05:55:13 +08:00
|
|
|
isVolatile);
|
|
|
|
|
|
|
|
bool valueAlreadyCorrect =
|
|
|
|
constant->isNullValue() || isa<llvm::UndefValue>(constant);
|
|
|
|
if (!valueAlreadyCorrect) {
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
|
2018-08-08 05:55:13 +08:00
|
|
|
emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-03-08 09:26:49 +08:00
|
|
|
// If the initializer is a repeated byte pattern, use memset.
|
2019-07-11 06:53:52 +08:00
|
|
|
llvm::Value *Pattern =
|
|
|
|
shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
|
2018-09-21 21:54:09 +08:00
|
|
|
if (Pattern) {
|
|
|
|
uint64_t Value = 0x00;
|
|
|
|
if (!isa<llvm::UndefValue>(Pattern)) {
|
|
|
|
const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
|
|
|
|
assert(AP.getBitWidth() <= 8);
|
|
|
|
Value = AP.getLimitedValue();
|
|
|
|
}
|
2019-06-27 09:34:21 +08:00
|
|
|
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal,
|
2018-08-08 05:55:13 +08:00
|
|
|
isVolatile);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-03-08 09:26:49 +08:00
|
|
|
// If the initializer is small, use a handful of stores.
|
|
|
|
if (shouldSplitConstantStore(CGM, ConstantSize)) {
|
|
|
|
if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
|
|
|
|
// FIXME: handle the case when STy != Loc.getElementType().
|
|
|
|
if (STy == Loc.getElementType()) {
|
|
|
|
for (unsigned i = 0; i != constant->getNumOperands(); i++) {
|
|
|
|
Address EltPtr = Builder.CreateStructGEP(Loc, i);
|
|
|
|
emitStoresForConstant(
|
|
|
|
CGM, D, EltPtr, isVolatile, Builder,
|
|
|
|
cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
|
|
|
|
// FIXME: handle the case when ATy != Loc.getElementType().
|
|
|
|
if (ATy == Loc.getElementType()) {
|
|
|
|
for (unsigned i = 0; i != ATy->getNumElements(); i++) {
|
|
|
|
Address EltPtr = Builder.CreateConstArrayGEP(Loc, i);
|
|
|
|
emitStoresForConstant(
|
|
|
|
CGM, D, EltPtr, isVolatile, Builder,
|
|
|
|
cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2019-03-01 17:00:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-08 09:26:49 +08:00
|
|
|
// Copy from a global.
|
2019-06-15 01:46:37 +08:00
|
|
|
Builder.CreateMemCpy(Loc,
|
|
|
|
createUnnamedGlobalForMemcpyFrom(
|
|
|
|
CGM, D, Builder, constant, Loc.getAlignment()),
|
|
|
|
SizeVal, isVolatile);
|
2018-08-08 05:55:13 +08:00
|
|
|
}
|
|
|
|
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
|
|
|
|
Address Loc, bool isVolatile,
|
|
|
|
CGBuilderTy &Builder) {
|
|
|
|
llvm::Type *ElTy = Loc.getElementType();
|
2019-02-26 18:46:21 +08:00
|
|
|
llvm::Constant *constant =
|
|
|
|
constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
|
|
|
|
Address Loc, bool isVolatile,
|
|
|
|
CGBuilderTy &Builder) {
|
|
|
|
llvm::Type *ElTy = Loc.getElementType();
|
2019-04-12 08:11:27 +08:00
|
|
|
llvm::Constant *constant = constWithPadding(
|
|
|
|
CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
assert(!isa<llvm::UndefValue>(constant));
|
|
|
|
emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool containsUndef(llvm::Constant *constant) {
|
|
|
|
auto *Ty = constant->getType();
|
|
|
|
if (isa<llvm::UndefValue>(constant))
|
|
|
|
return true;
|
|
|
|
if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
|
|
|
|
for (llvm::Use &Op : constant->operands())
|
|
|
|
if (containsUndef(cast<llvm::Constant>(Op)))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-02-26 18:46:21 +08:00
|
|
|
static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
|
|
|
|
llvm::Constant *constant) {
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
auto *Ty = constant->getType();
|
|
|
|
if (isa<llvm::UndefValue>(constant))
|
2019-02-26 18:46:21 +08:00
|
|
|
return patternOrZeroFor(CGM, isPattern, Ty);
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
|
|
|
|
return constant;
|
|
|
|
if (!containsUndef(constant))
|
|
|
|
return constant;
|
|
|
|
llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
|
|
|
|
for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
|
|
|
|
auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
|
2019-02-26 18:46:21 +08:00
|
|
|
Values[Op] = replaceUndef(CGM, isPattern, OpValue);
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
}
|
|
|
|
if (Ty->isStructTy())
|
|
|
|
return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
|
|
|
|
if (Ty->isArrayTy())
|
|
|
|
return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
|
|
|
|
assert(Ty->isVectorTy());
|
|
|
|
return llvm::ConstantVector::get(Values);
|
|
|
|
}
|
|
|
|
|
2010-12-31 04:21:55 +08:00
|
|
|
/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
|
2007-06-02 12:53:11 +08:00
|
|
|
/// variable declaration with auto, register, or no storage class specifier.
|
2008-05-08 13:58:21 +08:00
|
|
|
/// These turn into simple stack objects, or GlobalValues depending on target.
|
2011-02-22 14:44:22 +08:00
|
|
|
void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
|
|
|
|
AutoVarEmission emission = EmitAutoVarAlloca(D);
|
|
|
|
EmitAutoVarInit(emission);
|
|
|
|
EmitAutoVarCleanups(emission);
|
|
|
|
}
|
|
|
|
|
2015-04-23 05:38:15 +08:00
|
|
|
/// Emit a lifetime.begin marker if some criteria are satisfied.
|
|
|
|
/// \return a pointer to the temporary size Value if a marker was emitted, null
|
|
|
|
/// otherwise
|
|
|
|
llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
|
|
|
|
llvm::Value *Addr) {
|
2016-10-26 09:59:57 +08:00
|
|
|
if (!ShouldEmitLifetimeMarkers)
|
2015-04-24 02:07:13 +08:00
|
|
|
return nullptr;
|
2015-04-23 05:38:15 +08:00
|
|
|
|
2018-05-17 19:16:35 +08:00
|
|
|
assert(Addr->getType()->getPointerAddressSpace() ==
|
|
|
|
CGM.getDataLayout().getAllocaAddrSpace() &&
|
|
|
|
"Pointer should be in alloca address space");
|
2015-04-23 05:38:15 +08:00
|
|
|
llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
|
2017-04-18 04:03:11 +08:00
|
|
|
Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
|
2015-06-13 06:31:32 +08:00
|
|
|
llvm::CallInst *C =
|
|
|
|
Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
|
2015-04-23 05:38:15 +08:00
|
|
|
C->setDoesNotThrow();
|
|
|
|
return SizeV;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
|
2018-05-17 19:16:35 +08:00
|
|
|
assert(Addr->getType()->getPointerAddressSpace() ==
|
|
|
|
CGM.getDataLayout().getAllocaAddrSpace() &&
|
|
|
|
"Pointer should be in alloca address space");
|
2017-04-18 04:03:11 +08:00
|
|
|
Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
|
2015-06-13 06:31:32 +08:00
|
|
|
llvm::CallInst *C =
|
|
|
|
Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
|
2015-04-23 05:38:15 +08:00
|
|
|
C->setDoesNotThrow();
|
|
|
|
}
|
|
|
|
|
2018-02-03 21:55:59 +08:00
|
|
|
void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
|
|
|
|
CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
|
|
|
|
// For each dimension stores its QualType and corresponding
|
|
|
|
// size-expression Value.
|
|
|
|
SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
|
2018-11-10 03:17:56 +08:00
|
|
|
SmallVector<IdentifierInfo *, 4> VLAExprNames;
|
2018-02-03 21:55:59 +08:00
|
|
|
|
|
|
|
// Break down the array into individual dimensions.
|
|
|
|
QualType Type1D = D.getType();
|
|
|
|
while (getContext().getAsVariableArrayType(Type1D)) {
|
|
|
|
auto VlaSize = getVLAElements1D(Type1D);
|
|
|
|
if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
|
|
|
|
Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
|
|
|
|
else {
|
2018-11-10 03:17:56 +08:00
|
|
|
// Generate a locally unique name for the size expression.
|
|
|
|
Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
|
|
|
|
SmallString<12> Buffer;
|
|
|
|
StringRef NameRef = Name.toStringRef(Buffer);
|
|
|
|
auto &Ident = getContext().Idents.getOwn(NameRef);
|
|
|
|
VLAExprNames.push_back(&Ident);
|
|
|
|
auto SizeExprAddr =
|
|
|
|
CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
|
2018-02-03 21:55:59 +08:00
|
|
|
Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
|
|
|
|
Dimensions.emplace_back(SizeExprAddr.getPointer(),
|
|
|
|
Type1D.getUnqualifiedType());
|
|
|
|
}
|
|
|
|
Type1D = VlaSize.Type;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!EmitDebugInfo)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Register each dimension's size-expression with a DILocalVariable,
|
|
|
|
// so that it can be used by CGDebugInfo when instantiating a DISubrange
|
|
|
|
// to describe this array.
|
2018-11-10 03:17:56 +08:00
|
|
|
unsigned NameIdx = 0;
|
2018-02-03 21:55:59 +08:00
|
|
|
for (auto &VlaSize : Dimensions) {
|
|
|
|
llvm::Metadata *MD;
|
|
|
|
if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
|
|
|
|
MD = llvm::ConstantAsMetadata::get(C);
|
|
|
|
else {
|
|
|
|
// Create an artificial VarDecl to generate debug info for.
|
2018-11-10 03:17:56 +08:00
|
|
|
IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
|
2018-02-03 21:55:59 +08:00
|
|
|
auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
|
|
|
|
auto QT = getContext().getIntTypeForBitwidth(
|
|
|
|
VlaExprTy->getScalarSizeInBits(), false);
|
|
|
|
auto *ArtificialDecl = VarDecl::Create(
|
|
|
|
getContext(), const_cast<DeclContext *>(D.getDeclContext()),
|
2018-11-10 03:17:56 +08:00
|
|
|
D.getLocation(), D.getLocation(), NameIdent, QT,
|
2018-02-03 21:55:59 +08:00
|
|
|
getContext().CreateTypeSourceInfo(QT), SC_Auto);
|
2018-02-13 15:49:34 +08:00
|
|
|
ArtificialDecl->setImplicit();
|
2018-02-03 21:55:59 +08:00
|
|
|
|
|
|
|
MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
|
|
|
|
Builder);
|
|
|
|
}
|
|
|
|
assert(MD && "No Size expression debug node created");
|
|
|
|
DI->registerVLASizeExpression(VlaSize.Type, MD);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-22 14:44:22 +08:00
|
|
|
/// EmitAutoVarAlloca - Emit the alloca and debug information for a
|
2013-12-06 00:25:25 +08:00
|
|
|
/// local variable. Does not emit initialization or destruction.
|
2011-02-22 14:44:22 +08:00
|
|
|
CodeGenFunction::AutoVarEmission
|
|
|
|
CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
|
2008-04-07 07:10:54 +08:00
|
|
|
QualType Ty = D.getType();
|
2017-10-13 11:37:48 +08:00
|
|
|
assert(
|
|
|
|
Ty.getAddressSpace() == LangAS::Default ||
|
|
|
|
(Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
|
2011-02-22 14:44:22 +08:00
|
|
|
|
|
|
|
AutoVarEmission emission(D);
|
|
|
|
|
2018-10-02 05:51:28 +08:00
|
|
|
bool isEscapingByRef = D.isEscapingByref();
|
|
|
|
emission.IsEscapingByRef = isEscapingByRef;
|
2011-02-22 14:44:22 +08:00
|
|
|
|
|
|
|
CharUnits alignment = getContext().getDeclAlign(&D);
|
2007-06-02 12:53:11 +08:00
|
|
|
|
2011-06-25 05:55:10 +08:00
|
|
|
// If the type is variably-modified, emit all the VLA sizes for it.
|
|
|
|
if (Ty->isVariablyModifiedType())
|
|
|
|
EmitVariablyModifiedType(Ty);
|
|
|
|
|
2018-02-03 21:55:59 +08:00
|
|
|
auto *DI = getDebugInfo();
|
|
|
|
bool EmitDebugInfo = DI && CGM.getCodeGenOpts().getDebugInfo() >=
|
|
|
|
codegenoptions::LimitedDebugInfo;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address address = Address::invalid();
|
2018-05-17 19:16:35 +08:00
|
|
|
Address AllocaAddr = Address::invalid();
|
2019-04-04 01:57:06 +08:00
|
|
|
Address OpenMPLocalAddr =
|
|
|
|
getLangOpts().OpenMP
|
|
|
|
? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
|
|
|
|
: Address::invalid();
|
2019-06-21 01:15:21 +08:00
|
|
|
bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
|
|
|
|
|
2019-04-04 01:57:06 +08:00
|
|
|
if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
|
|
|
|
address = OpenMPLocalAddr;
|
|
|
|
} else if (Ty->isConstantSizeType()) {
|
2013-06-02 08:09:52 +08:00
|
|
|
// If this value is an array or struct with a statically determinable
|
|
|
|
// constant initializer, there are optimizations we can do.
|
2013-03-27 02:41:47 +08:00
|
|
|
//
|
|
|
|
// TODO: We should constant-evaluate the initializer of any variable,
|
|
|
|
// as long as it is initialized by a constant expression. Currently,
|
|
|
|
// isConstantInitializer produces wrong answers for structs with
|
|
|
|
// reference or bitfield members, and a few other cases, and checking
|
|
|
|
// for POD-ness protects us from some of these.
|
2013-06-02 08:09:52 +08:00
|
|
|
if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
|
|
|
|
(D.isConstexpr() ||
|
|
|
|
((Ty.isPODType(getContext()) ||
|
|
|
|
getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
|
|
|
|
D.getInit()->isConstantInitializer(getContext(), false)))) {
|
2013-03-27 02:41:47 +08:00
|
|
|
|
|
|
|
// If the variable's a const type, and it's neither an NRVO
|
|
|
|
// candidate nor a __block variable and has no mutable members,
|
|
|
|
// emit it as a global instead.
|
2016-11-30 01:01:19 +08:00
|
|
|
// Exception is if a variable is located in non-constant address space
|
|
|
|
// in OpenCL.
|
|
|
|
if ((!getLangOpts().OpenCL ||
|
|
|
|
Ty.getAddressSpace() == LangAS::opencl_constant) &&
|
2018-10-02 05:51:28 +08:00
|
|
|
(CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
|
|
|
|
!isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
|
2013-03-27 02:41:47 +08:00
|
|
|
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
// Signal this condition to later callbacks.
|
|
|
|
emission.Addr = Address::invalid();
|
2013-03-27 02:41:47 +08:00
|
|
|
assert(emission.wasEmittedAsGlobal());
|
|
|
|
return emission;
|
2009-11-04 09:18:09 +08:00
|
|
|
}
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2013-03-27 02:41:47 +08:00
|
|
|
// Otherwise, tell the initialization code that we're in this case.
|
|
|
|
emission.IsConstantAggregate = true;
|
|
|
|
}
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2013-03-27 02:41:47 +08:00
|
|
|
// A normal fixed sized variable becomes an alloca in the entry block,
|
2018-03-14 22:17:45 +08:00
|
|
|
// unless:
|
|
|
|
// - it's an NRVO variable.
|
|
|
|
// - we are compiling OpenMP and it's an OpenMP local variable.
|
2019-04-04 01:57:06 +08:00
|
|
|
if (NRVO) {
|
2013-03-27 02:41:47 +08:00
|
|
|
// The named return value optimization: allocate this variable in the
|
|
|
|
// return slot, so that we can elide the copy when returning this
|
|
|
|
// variable (C++0x [class.copy]p34).
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
address = ReturnValue;
|
2013-03-27 02:41:47 +08:00
|
|
|
|
|
|
|
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
|
2018-03-30 01:56:24 +08:00
|
|
|
const auto *RD = RecordTy->getDecl();
|
|
|
|
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
|
|
|
|
if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
|
|
|
|
RD->isNonTrivialToPrimitiveDestroy()) {
|
2013-03-27 02:41:47 +08:00
|
|
|
// Create a flag that is used to indicate when the NRVO was applied
|
|
|
|
// to this variable. Set it to zero to indicate that NRVO was not
|
|
|
|
// applied.
|
|
|
|
llvm::Value *Zero = Builder.getFalse();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address NRVOFlag =
|
|
|
|
CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
|
2013-03-27 02:41:47 +08:00
|
|
|
EnsureInsertPoint();
|
|
|
|
Builder.CreateStore(Zero, NRVOFlag);
|
|
|
|
|
|
|
|
// Record the NRVO flag for this variable.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
NRVOFlags[&D] = NRVOFlag.getPointer();
|
|
|
|
emission.NRVOFlag = NRVOFlag.getPointer();
|
2013-03-23 14:43:35 +08:00
|
|
|
}
|
2010-05-15 14:46:45 +08:00
|
|
|
}
|
2008-05-08 13:58:21 +08:00
|
|
|
} else {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits allocaAlignment;
|
|
|
|
llvm::Type *allocaTy;
|
2018-10-02 05:51:28 +08:00
|
|
|
if (isEscapingByRef) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
auto &byrefInfo = getBlockByrefInfo(&D);
|
|
|
|
allocaTy = byrefInfo.Type;
|
|
|
|
allocaAlignment = byrefInfo.ByrefAlignment;
|
|
|
|
} else {
|
|
|
|
allocaTy = ConvertTypeForMem(Ty);
|
|
|
|
allocaAlignment = alignment;
|
|
|
|
}
|
2013-03-27 02:41:47 +08:00
|
|
|
|
2015-09-08 17:18:30 +08:00
|
|
|
// Create the alloca. Note that we set the name separately from
|
|
|
|
// building the instruction so that it's there even in no-asserts
|
|
|
|
// builds.
|
2018-05-17 19:16:35 +08:00
|
|
|
address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
|
|
|
|
/*ArraySize=*/nullptr, &AllocaAddr);
|
2013-03-27 02:41:47 +08:00
|
|
|
|
2015-10-08 05:03:41 +08:00
|
|
|
// Don't emit lifetime markers for MSVC catch parameters. The lifetime of
|
|
|
|
// the catch parameter starts in the catchpad instruction, and we can't
|
|
|
|
// insert code in those basic blocks.
|
|
|
|
bool IsMSCatchParam =
|
|
|
|
D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
|
|
|
|
|
2016-10-26 13:42:30 +08:00
|
|
|
// Emit a lifetime intrinsic if meaningful. There's no point in doing this
|
|
|
|
// if we don't have a valid insertion point (?).
|
2015-10-08 05:03:41 +08:00
|
|
|
if (HaveInsertPoint() && !IsMSCatchParam) {
|
2017-01-26 06:55:13 +08:00
|
|
|
// If there's a jump into the lifetime of this variable, its lifetime
|
|
|
|
// gets broken up into several regions in IR, which requires more work
|
|
|
|
// to handle correctly. For now, just omit the intrinsics; this is a
|
|
|
|
// rare case, and it's better to just be conservatively correct.
|
|
|
|
// PR28267.
|
|
|
|
//
|
|
|
|
// We have to do this in all language modes if there's a jump past the
|
|
|
|
// declaration. We also have to do it in C if there's a jump to an
|
|
|
|
// earlier point in the current block because non-VLA lifetimes begin as
|
|
|
|
// soon as the containing block is entered, not when its variables
|
|
|
|
// actually come into scope; suppressing the lifetime annotations
|
|
|
|
// completely in this case is unnecessarily pessimistic, but again, this
|
|
|
|
// is rare.
|
|
|
|
if (!Bypasses.IsBypassed(&D) &&
|
|
|
|
!(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
|
2016-10-26 13:42:30 +08:00
|
|
|
uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
|
|
|
|
emission.SizeForLifetimeMarkers =
|
2018-05-17 19:16:35 +08:00
|
|
|
EmitLifetimeStart(size, AllocaAddr.getPointer());
|
2016-10-26 13:42:30 +08:00
|
|
|
}
|
2014-10-08 22:04:26 +08:00
|
|
|
} else {
|
2013-03-27 02:41:47 +08:00
|
|
|
assert(!emission.useLifetimeMarkers());
|
2014-10-08 22:04:26 +08:00
|
|
|
}
|
2008-05-08 13:58:21 +08:00
|
|
|
}
|
2007-06-02 12:53:11 +08:00
|
|
|
} else {
|
2009-07-19 14:58:07 +08:00
|
|
|
EnsureInsertPoint();
|
|
|
|
|
2009-02-10 04:41:50 +08:00
|
|
|
if (!DidCallStackSave) {
|
2008-12-12 15:38:43 +08:00
|
|
|
// Save the stack.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Stack =
|
|
|
|
CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2019-02-04 05:53:49 +08:00
|
|
|
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
|
2015-07-15 01:27:39 +08:00
|
|
|
llvm::Value *V = Builder.CreateCall(F);
|
2008-12-12 15:38:43 +08:00
|
|
|
Builder.CreateStore(V, Stack);
|
2009-02-10 04:41:50 +08:00
|
|
|
|
|
|
|
DidCallStackSave = true;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-07-21 14:13:08 +08:00
|
|
|
// Push a cleanup block and restore the stack there.
|
2011-01-28 16:37:24 +08:00
|
|
|
// FIXME: in general circumstances, this should be an EH cleanup.
|
2014-02-01 08:04:45 +08:00
|
|
|
pushStackRestore(NormalCleanup, Stack);
|
2008-12-12 15:38:43 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2018-02-03 21:55:59 +08:00
|
|
|
auto VlaSize = getVLASize(Ty);
|
|
|
|
llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
|
[DebugInfo] Enable debug information for C99 VLA types
Summary:
This patch enables debugging of C99 VLA types by generating more precise
LLVM Debug metadata, using the extended DISubrange 'count' field that
takes a DIVariable.
This should implement:
Bug 30553: Debug info generated for arrays is not what GDB expects (not as good as GCC's)
https://bugs.llvm.org/show_bug.cgi?id=30553
Reviewers: echristo, aprantl, dexonsmith, clayborg, pcc, kristof.beyls, dblaikie
Reviewed By: aprantl
Subscribers: jholewinski, schweitz, davide, fhahn, JDevlieghere, cfe-commits
Differential Revision: https://reviews.llvm.org/D41698
llvm-svn: 323952
2018-02-01 19:25:10 +08:00
|
|
|
|
2018-02-01 20:27:13 +08:00
|
|
|
// Allocate memory for the array.
|
2018-05-17 19:16:35 +08:00
|
|
|
address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
|
|
|
|
&AllocaAddr);
|
2018-02-03 21:55:59 +08:00
|
|
|
|
|
|
|
// If we have debug info enabled, properly describe the VLA dimensions for
|
|
|
|
// this type by registering the vla size expression for each of the
|
|
|
|
// dimensions.
|
|
|
|
EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
|
2007-06-02 12:53:11 +08:00
|
|
|
}
|
2008-12-21 07:11:59 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
setAddrOfLocalVar(&D, address);
|
|
|
|
emission.Addr = address;
|
2018-05-17 19:16:35 +08:00
|
|
|
emission.AllocaAddr = AllocaAddr;
|
2008-05-30 18:30:31 +08:00
|
|
|
|
|
|
|
// Emit debug info for local var declaration.
|
2018-02-03 21:55:59 +08:00
|
|
|
if (EmitDebugInfo && HaveInsertPoint()) {
|
2019-06-21 01:15:21 +08:00
|
|
|
Address DebugAddr = address;
|
|
|
|
bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
|
2018-02-03 21:55:59 +08:00
|
|
|
DI->setLocation(D.getLocation());
|
2019-06-21 01:15:21 +08:00
|
|
|
|
|
|
|
// If NRVO, use a pointer to the return address.
|
|
|
|
if (UsePointerValue)
|
|
|
|
DebugAddr = ReturnValuePointer;
|
|
|
|
|
|
|
|
(void)DI->EmitDeclareOfAutoVariable(&D, DebugAddr.getPointer(), Builder,
|
|
|
|
UsePointerValue);
|
2018-02-03 21:55:59 +08:00
|
|
|
}
|
2008-05-30 18:30:31 +08:00
|
|
|
|
2019-03-01 10:15:39 +08:00
|
|
|
if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitVarAnnotations(&D, address.getPointer());
|
2011-09-10 06:41:49 +08:00
|
|
|
|
2017-04-15 00:53:25 +08:00
|
|
|
// Make sure we call @llvm.lifetime.end.
|
|
|
|
if (emission.useLifetimeMarkers())
|
|
|
|
EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
|
2018-05-17 19:16:35 +08:00
|
|
|
emission.getOriginalAllocatedAddress(),
|
2017-04-15 00:53:25 +08:00
|
|
|
emission.getSizeForLifetimeMarkers());
|
|
|
|
|
2011-02-22 14:44:22 +08:00
|
|
|
return emission;
|
|
|
|
}
|
|
|
|
|
2018-05-19 12:21:26 +08:00
|
|
|
static bool isCapturedBy(const VarDecl &, const Expr *);
|
|
|
|
|
|
|
|
/// Determines whether the given __block variable is potentially
|
|
|
|
/// captured by the given statement.
|
|
|
|
static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
|
|
|
|
if (const Expr *E = dyn_cast<Expr>(S))
|
|
|
|
return isCapturedBy(Var, E);
|
|
|
|
for (const Stmt *SubStmt : S->children())
|
|
|
|
if (isCapturedBy(Var, SubStmt))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-02-22 14:44:22 +08:00
|
|
|
/// Determines whether the given __block variable is potentially
|
|
|
|
/// captured by the given expression.
|
2018-05-19 12:21:26 +08:00
|
|
|
static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
|
2011-02-22 14:44:22 +08:00
|
|
|
// Skip the most common kinds of expressions that make
|
|
|
|
// hierarchy-walking expensive.
|
2018-05-19 12:21:26 +08:00
|
|
|
E = E->IgnoreParenCasts();
|
2011-02-22 14:44:22 +08:00
|
|
|
|
2018-05-19 12:21:26 +08:00
|
|
|
if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
|
|
|
|
const BlockDecl *Block = BE->getBlockDecl();
|
|
|
|
for (const auto &I : Block->captures()) {
|
|
|
|
if (I.getVariable() == &Var)
|
2011-02-22 14:44:22 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// No need to walk into the subexpressions.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-19 12:21:26 +08:00
|
|
|
if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
|
2011-08-24 00:47:15 +08:00
|
|
|
const CompoundStmt *CS = SE->getSubStmt();
|
2014-03-17 22:19:37 +08:00
|
|
|
for (const auto *BI : CS->body())
|
2018-05-19 12:21:26 +08:00
|
|
|
if (const auto *BIE = dyn_cast<Expr>(BI)) {
|
|
|
|
if (isCapturedBy(Var, BIE))
|
|
|
|
return true;
|
2011-08-25 08:06:26 +08:00
|
|
|
}
|
2014-03-17 22:19:37 +08:00
|
|
|
else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
|
2011-08-25 08:06:26 +08:00
|
|
|
// special case declarations
|
2014-03-15 01:01:24 +08:00
|
|
|
for (const auto *I : DS->decls()) {
|
|
|
|
if (const auto *VD = dyn_cast<VarDecl>((I))) {
|
|
|
|
const Expr *Init = VD->getInit();
|
2018-05-19 12:21:26 +08:00
|
|
|
if (Init && isCapturedBy(Var, Init))
|
2011-08-25 08:06:26 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
// FIXME. Make safe assumption assuming arbitrary statements cause capturing.
|
|
|
|
// Later, provide code to poke into statements for capture analysis.
|
|
|
|
return true;
|
2011-08-24 00:47:15 +08:00
|
|
|
return false;
|
|
|
|
}
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2018-05-19 12:21:26 +08:00
|
|
|
for (const Stmt *SubStmt : E->children())
|
|
|
|
if (isCapturedBy(Var, SubStmt))
|
2011-02-22 14:44:22 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Determine whether the given initializer is trivial in the sense
|
2011-07-02 05:08:19 +08:00
|
|
|
/// that it requires no code to be generated.
|
2014-10-08 22:01:46 +08:00
|
|
|
bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
|
2011-07-02 05:08:19 +08:00
|
|
|
if (!Init)
|
|
|
|
return true;
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2011-07-02 05:08:19 +08:00
|
|
|
if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
|
|
|
|
if (CXXConstructorDecl *Constructor = Construct->getConstructor())
|
|
|
|
if (Constructor->isTrivial() &&
|
|
|
|
Constructor->isDefaultConstructor() &&
|
|
|
|
!Construct->requiresZeroInitialization())
|
|
|
|
return true;
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2011-07-02 05:08:19 +08:00
|
|
|
return false;
|
|
|
|
}
|
2016-02-11 03:11:58 +08:00
|
|
|
|
2019-07-11 06:53:50 +08:00
|
|
|
void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
|
|
|
|
const VarDecl &D,
|
|
|
|
Address Loc) {
|
|
|
|
auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
|
|
|
|
CharUnits Size = getContext().getTypeSizeInChars(type);
|
|
|
|
bool isVolatile = type.isVolatileQualified();
|
|
|
|
if (!Size.isZero()) {
|
|
|
|
switch (trivialAutoVarInit) {
|
|
|
|
case LangOptions::TrivialAutoVarInitKind::Uninitialized:
|
|
|
|
llvm_unreachable("Uninitialized handled by caller");
|
|
|
|
case LangOptions::TrivialAutoVarInitKind::Zero:
|
|
|
|
emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
|
|
|
|
break;
|
|
|
|
case LangOptions::TrivialAutoVarInitKind::Pattern:
|
|
|
|
emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
|
|
|
|
// them, so emit a memcpy with the VLA size to initialize each element.
|
|
|
|
// Technically zero-sized or negative-sized VLAs are undefined, and UBSan
|
|
|
|
// will catch that code, but there exists code which generates zero-sized
|
|
|
|
// VLAs. Be nice and initialize whatever they requested.
|
|
|
|
const auto *VlaType = getContext().getAsVariableArrayType(type);
|
|
|
|
if (!VlaType)
|
|
|
|
return;
|
|
|
|
auto VlaSize = getVLASize(VlaType);
|
|
|
|
auto SizeVal = VlaSize.NumElts;
|
|
|
|
CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
|
|
|
|
switch (trivialAutoVarInit) {
|
|
|
|
case LangOptions::TrivialAutoVarInitKind::Uninitialized:
|
|
|
|
llvm_unreachable("Uninitialized handled by caller");
|
|
|
|
|
|
|
|
case LangOptions::TrivialAutoVarInitKind::Zero:
|
|
|
|
if (!EltSize.isOne())
|
|
|
|
SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
|
|
|
|
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
|
|
|
|
isVolatile);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LangOptions::TrivialAutoVarInitKind::Pattern: {
|
|
|
|
llvm::Type *ElTy = Loc.getElementType();
|
|
|
|
llvm::Constant *Constant = constWithPadding(
|
|
|
|
CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
|
|
|
|
CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
|
|
|
|
llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
|
|
|
|
llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
|
|
|
|
llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
|
|
|
|
llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
|
|
|
|
SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
|
|
|
|
"vla.iszerosized");
|
|
|
|
Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
|
|
|
|
EmitBlock(SetupBB);
|
|
|
|
if (!EltSize.isOne())
|
|
|
|
SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
|
|
|
|
llvm::Value *BaseSizeInChars =
|
|
|
|
llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
|
|
|
|
Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
|
|
|
|
llvm::Value *End =
|
|
|
|
Builder.CreateInBoundsGEP(Begin.getPointer(), SizeVal, "vla.end");
|
|
|
|
llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
|
|
|
|
EmitBlock(LoopBB);
|
|
|
|
llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
|
|
|
|
Cur->addIncoming(Begin.getPointer(), OriginBB);
|
|
|
|
CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
|
|
|
|
Builder.CreateMemCpy(Address(Cur, CurAlign),
|
|
|
|
createUnnamedGlobalForMemcpyFrom(
|
|
|
|
CGM, D, Builder, Constant, ConstantAlign),
|
|
|
|
BaseSizeInChars, isVolatile);
|
|
|
|
llvm::Value *Next =
|
|
|
|
Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
|
|
|
|
llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
|
|
|
|
Builder.CreateCondBr(Done, ContBB, LoopBB);
|
|
|
|
Cur->addIncoming(Next, LoopBB);
|
|
|
|
EmitBlock(ContBB);
|
|
|
|
} break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-22 14:44:22 +08:00
|
|
|
void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
|
2011-02-22 15:16:58 +08:00
|
|
|
assert(emission.Variable && "emission was not valid!");
|
|
|
|
|
2011-02-22 14:44:22 +08:00
|
|
|
// If this was emitted as a global constant, we're done.
|
|
|
|
if (emission.wasEmittedAsGlobal()) return;
|
|
|
|
|
2011-02-22 15:16:58 +08:00
|
|
|
const VarDecl &D = *emission.Variable;
|
2015-02-04 04:00:54 +08:00
|
|
|
auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
|
2011-02-22 14:44:22 +08:00
|
|
|
QualType type = D.getType();
|
|
|
|
|
2007-07-12 08:39:48 +08:00
|
|
|
// If this local has an initializer, emit it now.
|
2009-07-19 14:58:07 +08:00
|
|
|
const Expr *Init = D.getInit();
|
|
|
|
|
|
|
|
// If we are at an unreachable point, we don't need to emit the initializer
|
|
|
|
// unless it contains a label.
|
|
|
|
if (!HaveInsertPoint()) {
|
2011-02-22 14:44:22 +08:00
|
|
|
if (!Init || !ContainsLabel(Init)) return;
|
|
|
|
EnsureInsertPoint();
|
2009-07-19 14:58:07 +08:00
|
|
|
}
|
|
|
|
|
2011-03-31 09:59:53 +08:00
|
|
|
// Initialize the structure of a __block variable.
|
2018-10-02 05:51:28 +08:00
|
|
|
if (emission.IsEscapingByRef)
|
2011-03-31 09:59:53 +08:00
|
|
|
emitByrefStructureInit(emission);
|
2009-02-08 07:51:38 +08:00
|
|
|
|
2018-02-28 15:15:55 +08:00
|
|
|
// Initialize the variable here if it doesn't have a initializer and it is a
|
|
|
|
// C struct that is non-trivial to initialize or an array containing such a
|
|
|
|
// struct.
|
|
|
|
if (!Init &&
|
|
|
|
type.isNonTrivialToPrimitiveDefaultInitialize() ==
|
|
|
|
QualType::PDIK_Struct) {
|
|
|
|
LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
|
2018-10-02 05:51:28 +08:00
|
|
|
if (emission.IsEscapingByRef)
|
2018-02-28 15:15:55 +08:00
|
|
|
drillIntoBlockVariable(*this, Dst, &D);
|
|
|
|
defaultInitNonTrivialCStructVar(Dst);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-02-22 14:44:22 +08:00
|
|
|
// Check whether this is a byref variable that's potentially
|
|
|
|
// captured and moved by its own initializer. If so, we'll need to
|
|
|
|
// emit the initializer first, then copy into the variable.
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
bool capturedByInit =
|
|
|
|
Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
|
2011-02-22 14:44:22 +08:00
|
|
|
|
2019-02-16 01:26:29 +08:00
|
|
|
bool locIsByrefHeader = !capturedByInit;
|
|
|
|
const Address Loc =
|
|
|
|
locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
|
|
|
|
// Note: constexpr already initializes everything correctly.
|
|
|
|
LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
|
|
|
|
(D.isConstexpr()
|
|
|
|
? LangOptions::TrivialAutoVarInitKind::Uninitialized
|
|
|
|
: (D.getAttr<UninitializedAttr>()
|
|
|
|
? LangOptions::TrivialAutoVarInitKind::Uninitialized
|
|
|
|
: getContext().getLangOpts().getTrivialAutoVarInit()));
|
|
|
|
|
2019-02-08 09:29:17 +08:00
|
|
|
auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
if (trivialAutoVarInit ==
|
|
|
|
LangOptions::TrivialAutoVarInitKind::Uninitialized)
|
|
|
|
return;
|
|
|
|
|
2019-02-08 09:29:17 +08:00
|
|
|
// Only initialize a __block's storage: we always initialize the header.
|
2019-02-16 01:26:29 +08:00
|
|
|
if (emission.IsEscapingByRef && !locIsByrefHeader)
|
2019-02-08 09:29:17 +08:00
|
|
|
Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
|
|
|
|
|
2019-07-11 06:53:50 +08:00
|
|
|
return emitZeroOrPatternForAutoVarInit(type, D, Loc);
|
Automatic variable initialization
Summary:
Add an option to initialize automatic variables with either a pattern or with
zeroes. The default is still that automatic variables are uninitialized. Also
add attributes to request uninitialized on a per-variable basis, mainly to disable
initialization of large stack arrays when deemed too expensive.
This isn't meant to change the semantics of C and C++. Rather, it's meant to be
a last-resort when programmers inadvertently have some undefined behavior in
their code. This patch aims to make undefined behavior hurt less, which
security-minded people will be very happy about. Notably, this means that
there's no inadvertent information leak when:
- The compiler re-uses stack slots, and a value is used uninitialized.
- The compiler re-uses a register, and a value is used uninitialized.
- Stack structs / arrays / unions with padding are copied.
This patch only addresses stack and register information leaks. There's many
more infoleaks that we could address, and much more undefined behavior that
could be tamed. Let's keep this patch focused, and I'm happy to address related
issues elsewhere.
To keep the patch simple, only some `undef` is removed for now, see
`replaceUndef`. The padding-related infoleaks are therefore not all gone yet.
This will be addressed in a follow-up, mainly because addressing padding-related
leaks should be a stand-alone option which is implied by variable
initialization.
There are three options when it comes to automatic variable initialization:
0. Uninitialized
This is C and C++'s default. It's not changing. Depending on code
generation, a programmer who runs into undefined behavior by using an
uninialized automatic variable may observe any previous value (including
program secrets), or any value which the compiler saw fit to materialize on
the stack or in a register (this could be to synthesize an immediate, to
refer to code or data locations, to generate cookies, etc).
1. Pattern initialization
This is the recommended initialization approach. Pattern initialization's
goal is to initialize automatic variables with values which will likely
transform logic bugs into crashes down the line, are easily recognizable in
a crash dump, without being values which programmers can rely on for useful
program semantics. At the same time, pattern initialization tries to
generate code which will optimize well. You'll find the following details in
`patternFor`:
- Integers are initialized with repeated 0xAA bytes (infinite scream).
- Vectors of integers are also initialized with infinite scream.
- Pointers are initialized with infinite scream on 64-bit platforms because
it's an unmappable pointer value on architectures I'm aware of. Pointers
are initialize to 0x000000AA (small scream) on 32-bit platforms because
32-bit platforms don't consistently offer unmappable pages. When they do
it's usually the zero page. As people try this out, I expect that we'll
want to allow different platforms to customize this, let's do so later.
- Vectors of pointers are initialized the same way pointers are.
- Floating point values and vectors are initialized with a negative quiet
NaN with repeated 0xFF payload (e.g. 0xffffffff and 0xffffffffffffffff).
NaNs are nice (here, anways) because they propagate on arithmetic, making
it more likely that entire computations become NaN when a single
uninitialized value sneaks in.
- Arrays are initialized to their homogeneous elements' initialization
value, repeated. Stack-based Variable-Length Arrays (VLAs) are
runtime-initialized to the allocated size (no effort is made for negative
size, but zero-sized VLAs are untouched even if technically undefined).
- Structs are initialized to their heterogeneous element's initialization
values. Zero-size structs are initialized as 0xAA since they're allocated
a single byte.
- Unions are initialized using the initialization for the largest member of
the union.
Expect the values used for pattern initialization to change over time, as we
refine heuristics (both for performance and security). The goal is truly to
avoid injecting semantics into undefined behavior, and we should be
comfortable changing these values when there's a worthwhile point in doing
so.
Why so much infinite scream? Repeated byte patterns tend to be easy to
synthesize on most architectures, and otherwise memset is usually very
efficient. For values which aren't entirely repeated byte patterns, LLVM
will often generate code which does memset + a few stores.
2. Zero initialization
Zero initialize all values. This has the unfortunate side-effect of
providing semantics to otherwise undefined behavior, programs therefore
might start to rely on this behavior, and that's sad. However, some
programmers believe that pattern initialization is too expensive for them,
and data might show that they're right. The only way to make these
programmers wrong is to offer zero-initialization as an option, figure out
where they are right, and optimize the compiler into submission. Until the
compiler provides acceptable performance for all security-minded code, zero
initialization is a useful (if blunt) tool.
I've been asked for a fourth initialization option: user-provided byte value.
This might be useful, and can easily be added later.
Why is an out-of band initialization mecanism desired? We could instead use
-Wuninitialized! Indeed we could, but then we're forcing the programmer to
provide semantics for something which doesn't actually have any (it's
uninitialized!). It's then unclear whether `int derp = 0;` lends meaning to `0`,
or whether it's just there to shut that warning up. It's also way easier to use
a compiler flag than it is to manually and intelligently initialize all values
in a program.
Why not just rely on static analysis? Because it cannot reason about all dynamic
code paths effectively, and it has false positives. It's a great tool, could get
even better, but it's simply incapable of catching all uses of uninitialized
values.
Why not just rely on memory sanitizer? Because it's not universally available,
has a 3x performance cost, and shouldn't be deployed in production. Again, it's
a great tool, it'll find the dynamic uses of uninitialized variables that your
test coverage hits, but it won't find the ones that you encounter in production.
What's the performance like? Not too bad! Previous publications [0] have cited
2.7 to 4.5% averages. We've commmitted a few patches over the last few months to
address specific regressions, both in code size and performance. In all cases,
the optimizations are generally useful, but variable initialization benefits
from them a lot more than regular code does. We've got a handful of other
optimizations in mind, but the code is in good enough shape and has found enough
latent issues that it's a good time to get the change reviewed, checked in, and
have others kick the tires. We'll continue reducing overheads as we try this out
on diverse codebases.
Is it a good idea? Security-minded folks think so, and apparently so does the
Microsoft Visual Studio team [1] who say "Between 2017 and mid 2018, this
feature would have killed 49 MSRC cases that involved uninitialized struct data
leaking across a trust boundary. It would have also mitigated a number of bugs
involving uninitialized struct data being used directly.". They seem to use pure
zero initialization, and claim to have taken the overheads down to within noise.
Don't just trust Microsoft though, here's another relevant person asking for
this [2]. It's been proposed for GCC [3] and LLVM [4] before.
What are the caveats? A few!
- Variables declared in unreachable code, and used later, aren't initialized.
This goto, Duff's device, other objectionable uses of switch. This should
instead be a hard-error in any serious codebase.
- Volatile stack variables are still weird. That's pre-existing, it's really
the language's fault and this patch keeps it weird. We should deprecate
volatile [5].
- As noted above, padding isn't fully handled yet.
I don't think these caveats make the patch untenable because they can be
addressed separately.
Should this be on by default? Maybe, in some circumstances. It's a conversation
we can have when we've tried it out sufficiently, and we're confident that we've
eliminated enough of the overheads that most codebases would want to opt-in.
Let's keep our precious undefined behavior until that point in time.
How do I use it:
1. On the command-line:
-ftrivial-auto-var-init=uninitialized (the default)
-ftrivial-auto-var-init=pattern
-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
2. Using an attribute:
int dont_initialize_me __attribute((uninitialized));
[0]: https://users.elis.ugent.be/~jsartor/researchDocs/OOPSLA2011Zero-submit.pdf
[1]: https://twitter.com/JosephBialek/status/1062774315098112001
[2]: https://outflux.net/slides/2018/lss/danger.pdf
[3]: https://gcc.gnu.org/ml/gcc-patches/2014-06/msg00615.html
[4]: https://github.com/AndroidHardeningArchive/platform_external_clang/commit/776a0955ef6686d23a82d2e6a3cbd4a6a882c31c
[5]: http://wg21.link/p1152
I've also posted an RFC to cfe-dev: http://lists.llvm.org/pipermail/cfe-dev/2018-November/060172.html
<rdar://problem/39131435>
Reviewers: pcc, kcc, rsmith
Subscribers: JDevlieghere, jkorous, dexonsmith, cfe-commits
Differential Revision: https://reviews.llvm.org/D54604
llvm-svn: 349442
2018-12-18 13:12:21 +08:00
|
|
|
};
|
|
|
|
|
2019-07-11 06:53:50 +08:00
|
|
|
if (isTrivialInitializer(Init))
|
|
|
|
return initializeWhatIsTechnicallyUninitialized(Loc);
|
2011-02-22 14:44:22 +08:00
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Constant *constant = nullptr;
|
2019-06-12 01:50:32 +08:00
|
|
|
if (emission.IsConstantAggregate ||
|
|
|
|
D.mightBeUsableInConstantExpressions(getContext())) {
|
2011-12-31 05:15:51 +08:00
|
|
|
assert(!capturedByInit && "constant init contains a capturing block?");
|
2017-08-16 05:42:52 +08:00
|
|
|
constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
|
Variable auto-init: don't initialize aggregate padding of all aggregates
Summary:
C guarantees that brace-init with fewer initializers than members in the
aggregate will initialize the rest of the aggregate as-if it were static
initialization. In turn static initialization guarantees that padding is
initialized to zero bits.
Quoth the Standard:
C17 6.7.9 Initialization ❡21
If there are fewer initializers in a brace-enclosed list than there are elements
or members of an aggregate, or fewer characters in a string literal used to
initialize an array of known size than there are elements in the array, the
remainder of the aggregate shall be initialized implicitly the same as objects
that have static storage duration.
C17 6.7.9 Initialization ❡10
If an object that has automatic storage duration is not initialized explicitly,
its value is indeterminate. If an object that has static or thread storage
duration is not initialized explicitly, then:
* if it has pointer type, it is initialized to a null pointer;
* if it has arithmetic type, it is initialized to (positive or unsigned) zero;
* if it is an aggregate, every member is initialized (recursively) according to
these rules, and any padding is initialized to zero bits;
* if it is a union, the first named member is initialized (recursively)
according to these rules, and any padding is initialized to zero bits;
<rdar://problem/50188861>
Reviewers: glider, pcc, kcc, rjmccall, erik.pilkington
Subscribers: jkorous, dexonsmith, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D61280
llvm-svn: 359628
2019-05-01 06:56:53 +08:00
|
|
|
if (constant && !constant->isZeroValue() &&
|
|
|
|
(trivialAutoVarInit !=
|
|
|
|
LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
|
2019-02-26 18:46:21 +08:00
|
|
|
IsPattern isPattern =
|
|
|
|
(trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
|
|
|
|
? IsPattern::Yes
|
|
|
|
: IsPattern::No;
|
Variable auto-init: don't initialize aggregate padding of all aggregates
Summary:
C guarantees that brace-init with fewer initializers than members in the
aggregate will initialize the rest of the aggregate as-if it were static
initialization. In turn static initialization guarantees that padding is
initialized to zero bits.
Quoth the Standard:
C17 6.7.9 Initialization ❡21
If there are fewer initializers in a brace-enclosed list than there are elements
or members of an aggregate, or fewer characters in a string literal used to
initialize an array of known size than there are elements in the array, the
remainder of the aggregate shall be initialized implicitly the same as objects
that have static storage duration.
C17 6.7.9 Initialization ❡10
If an object that has automatic storage duration is not initialized explicitly,
its value is indeterminate. If an object that has static or thread storage
duration is not initialized explicitly, then:
* if it has pointer type, it is initialized to a null pointer;
* if it has arithmetic type, it is initialized to (positive or unsigned) zero;
* if it is an aggregate, every member is initialized (recursively) according to
these rules, and any padding is initialized to zero bits;
* if it is a union, the first named member is initialized (recursively)
according to these rules, and any padding is initialized to zero bits;
<rdar://problem/50188861>
Reviewers: glider, pcc, kcc, rjmccall, erik.pilkington
Subscribers: jkorous, dexonsmith, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D61280
llvm-svn: 359628
2019-05-01 06:56:53 +08:00
|
|
|
// C guarantees that brace-init with fewer initializers than members in
|
|
|
|
// the aggregate will initialize the rest of the aggregate as-if it were
|
|
|
|
// static initialization. In turn static initialization guarantees that
|
|
|
|
// padding is initialized to zero bits. We could instead pattern-init if D
|
|
|
|
// has any ImplicitValueInitExpr, but that seems to be unintuitive
|
|
|
|
// behavior.
|
|
|
|
constant = constWithPadding(CGM, IsPattern::No,
|
2019-02-26 18:46:21 +08:00
|
|
|
replaceUndef(CGM, isPattern, constant));
|
|
|
|
}
|
2011-12-31 05:15:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!constant) {
|
2019-02-08 09:29:17 +08:00
|
|
|
initializeWhatIsTechnicallyUninitialized(Loc);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
LValue lv = MakeAddrLValue(Loc, type);
|
2011-06-16 12:16:24 +08:00
|
|
|
lv.setNonGC(true);
|
2015-01-14 15:38:27 +08:00
|
|
|
return EmitExprAsInit(Init, &D, lv, capturedByInit);
|
2011-06-16 12:16:24 +08:00
|
|
|
}
|
2011-03-08 17:11:50 +08:00
|
|
|
|
2013-06-02 08:09:52 +08:00
|
|
|
if (!emission.IsConstantAggregate) {
|
|
|
|
// For simple scalar/complex initialization, store the value directly.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
LValue lv = MakeAddrLValue(Loc, type);
|
2013-06-02 08:09:52 +08:00
|
|
|
lv.setNonGC(true);
|
|
|
|
return EmitStoreThroughLValue(RValue::get(constant), lv, true);
|
|
|
|
}
|
|
|
|
|
2018-05-15 03:20:12 +08:00
|
|
|
llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
|
2019-02-16 01:26:29 +08:00
|
|
|
emitStoresForConstant(
|
|
|
|
CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
|
2019-06-27 09:34:21 +08:00
|
|
|
type.isVolatileQualified(), Builder, constant);
|
2011-03-08 17:11:50 +08:00
|
|
|
}
|
|
|
|
|
2018-04-06 04:52:58 +08:00
|
|
|
/// Emit an expression as an initializer for an object (variable, field, etc.)
|
|
|
|
/// at the given location. The expression is not necessarily the normal
|
|
|
|
/// initializer for the object, and the address is not necessarily
|
2011-03-08 17:11:50 +08:00
|
|
|
/// its normal location.
|
|
|
|
///
|
|
|
|
/// \param init the initializing expression
|
2018-04-06 04:52:58 +08:00
|
|
|
/// \param D the object to act as if we're initializing
|
2011-03-08 17:11:50 +08:00
|
|
|
/// \param loc the address to initialize; its type is a pointer
|
2018-04-06 04:52:58 +08:00
|
|
|
/// to the LLVM mapping of the object's type
|
2011-03-08 17:11:50 +08:00
|
|
|
/// \param alignment the alignment of the address
|
2018-04-06 04:52:58 +08:00
|
|
|
/// \param capturedByInit true if \p D is a __block variable
|
2011-03-08 17:11:50 +08:00
|
|
|
/// whose address is potentially changed by the initializer
|
2014-12-09 08:32:22 +08:00
|
|
|
void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
|
2015-01-14 15:38:27 +08:00
|
|
|
LValue lvalue, bool capturedByInit) {
|
2011-06-16 07:02:42 +08:00
|
|
|
QualType type = D->getType();
|
2011-03-08 17:11:50 +08:00
|
|
|
|
|
|
|
if (type->isReferenceType()) {
|
2013-06-13 07:38:09 +08:00
|
|
|
RValue rvalue = EmitReferenceBindingToExpr(init);
|
2011-08-24 06:38:00 +08:00
|
|
|
if (capturedByInit)
|
2011-06-16 12:16:24 +08:00
|
|
|
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
|
2015-01-14 15:38:27 +08:00
|
|
|
EmitStoreThroughLValue(rvalue, lvalue, true);
|
2013-03-08 05:37:08 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
switch (getEvaluationKind(type)) {
|
|
|
|
case TEK_Scalar:
|
2015-01-14 15:38:27 +08:00
|
|
|
EmitScalarInit(init, D, lvalue, capturedByInit);
|
2013-03-08 05:37:08 +08:00
|
|
|
return;
|
|
|
|
case TEK_Complex: {
|
2011-03-08 17:11:50 +08:00
|
|
|
ComplexPairTy complex = EmitComplexExpr(init);
|
2011-06-16 12:16:24 +08:00
|
|
|
if (capturedByInit)
|
|
|
|
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
|
2015-01-14 15:38:27 +08:00
|
|
|
EmitStoreOfComplex(complex, lvalue, /*init*/ true);
|
2013-03-08 05:37:08 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
case TEK_Aggregate:
|
2013-03-08 05:37:17 +08:00
|
|
|
if (type->isAtomicType()) {
|
|
|
|
EmitAtomicInit(const_cast<Expr*>(init), lvalue);
|
|
|
|
} else {
|
2018-04-06 04:52:58 +08:00
|
|
|
AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
|
|
|
|
if (isa<VarDecl>(D))
|
|
|
|
Overlap = AggValueSlot::DoesNotOverlap;
|
|
|
|
else if (auto *FD = dyn_cast<FieldDecl>(D))
|
2019-06-21 04:56:20 +08:00
|
|
|
Overlap = getOverlapForFieldInit(FD);
|
2013-03-08 05:37:17 +08:00
|
|
|
// TODO: how can we delay here if D is captured by its initializer?
|
2019-12-04 07:17:01 +08:00
|
|
|
EmitAggExpr(init, AggValueSlot::forLValue(
|
|
|
|
lvalue, *this, AggValueSlot::IsDestructed,
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
|
|
|
AggValueSlot::IsNotAliased, Overlap));
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
2013-03-08 05:37:08 +08:00
|
|
|
return;
|
2010-03-13 05:40:43 +08:00
|
|
|
}
|
2013-03-08 05:37:08 +08:00
|
|
|
llvm_unreachable("bad evaluation kind");
|
2011-02-22 14:44:22 +08:00
|
|
|
}
|
|
|
|
|
2011-07-09 09:37:26 +08:00
|
|
|
/// Enter a destroy cleanup for the given local variable.
|
|
|
|
void CodeGenFunction::emitAutoVarTypeCleanup(
|
|
|
|
const CodeGenFunction::AutoVarEmission &emission,
|
|
|
|
QualType::DestructionKind dtorKind) {
|
|
|
|
assert(dtorKind != QualType::DK_none);
|
|
|
|
|
|
|
|
// Note that for __block variables, we want to destroy the
|
|
|
|
// original stack object, not the possibly forwarded object.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr = emission.getObjectAddress(*this);
|
2011-07-09 09:37:26 +08:00
|
|
|
|
|
|
|
const VarDecl *var = emission.Variable;
|
|
|
|
QualType type = var->getType();
|
|
|
|
|
|
|
|
CleanupKind cleanupKind = NormalAndEHCleanup;
|
2014-05-21 13:09:00 +08:00
|
|
|
CodeGenFunction::Destroyer *destroyer = nullptr;
|
2011-07-09 09:37:26 +08:00
|
|
|
|
|
|
|
switch (dtorKind) {
|
|
|
|
case QualType::DK_none:
|
|
|
|
llvm_unreachable("no cleanup for trivially-destructible variable");
|
|
|
|
|
|
|
|
case QualType::DK_cxx_destructor:
|
|
|
|
// If there's an NRVO flag on the emission, we need a different
|
|
|
|
// cleanup.
|
|
|
|
if (emission.NRVOFlag) {
|
|
|
|
assert(!type->isArrayType());
|
|
|
|
CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
|
2019-07-22 17:39:13 +08:00
|
|
|
EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
|
2018-03-30 01:56:24 +08:00
|
|
|
emission.NRVOFlag);
|
2011-07-09 09:37:26 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QualType::DK_objc_strong_lifetime:
|
|
|
|
// Suppress cleanups for pseudo-strong variables.
|
|
|
|
if (var->isARCPseudoStrong()) return;
|
2011-08-24 06:38:00 +08:00
|
|
|
|
2011-07-09 09:37:26 +08:00
|
|
|
// Otherwise, consider whether to use an EH cleanup or not.
|
|
|
|
cleanupKind = getARCCleanupKind();
|
|
|
|
|
|
|
|
// Use the imprecise destroyer by default.
|
|
|
|
if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
|
|
|
|
destroyer = CodeGenFunction::destroyARCStrongImprecise;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QualType::DK_objc_weak_lifetime:
|
|
|
|
break;
|
2018-02-28 15:15:55 +08:00
|
|
|
|
|
|
|
case QualType::DK_nontrivial_c_struct:
|
|
|
|
destroyer = CodeGenFunction::destroyNonTrivialCStruct;
|
2018-03-30 01:56:24 +08:00
|
|
|
if (emission.NRVOFlag) {
|
|
|
|
assert(!type->isArrayType());
|
|
|
|
EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
|
|
|
|
emission.NRVOFlag, type);
|
|
|
|
return;
|
|
|
|
}
|
2018-02-28 15:15:55 +08:00
|
|
|
break;
|
2011-07-09 09:37:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we haven't chosen a more specific destroyer, use the default.
|
2012-01-26 11:33:36 +08:00
|
|
|
if (!destroyer) destroyer = getDestroyer(dtorKind);
|
2011-07-11 16:38:19 +08:00
|
|
|
|
2012-09-27 18:16:10 +08:00
|
|
|
// Use an EH cleanup in array destructors iff the destructor itself
|
2011-07-11 16:38:19 +08:00
|
|
|
// is being pushed as an EH cleanup.
|
|
|
|
bool useEHCleanup = (cleanupKind & EHCleanup);
|
|
|
|
EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
|
|
|
|
useEHCleanup);
|
2011-07-09 09:37:26 +08:00
|
|
|
}
|
|
|
|
|
2011-02-22 14:44:22 +08:00
|
|
|
void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
|
2011-02-22 15:16:58 +08:00
|
|
|
assert(emission.Variable && "emission was not valid!");
|
|
|
|
|
2011-02-22 14:44:22 +08:00
|
|
|
// If this was emitted as a global constant, we're done.
|
|
|
|
if (emission.wasEmittedAsGlobal()) return;
|
|
|
|
|
2012-04-14 02:44:05 +08:00
|
|
|
// If we don't have an insertion point, we're done. Sema prevents
|
|
|
|
// us from jumping into any of these scopes anyway.
|
|
|
|
if (!HaveInsertPoint()) return;
|
|
|
|
|
2011-02-22 15:16:58 +08:00
|
|
|
const VarDecl &D = *emission.Variable;
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-07-09 09:37:26 +08:00
|
|
|
// Check the type for a cleanup.
|
2019-09-29 13:08:46 +08:00
|
|
|
if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
|
2011-07-09 09:37:26 +08:00
|
|
|
emitAutoVarTypeCleanup(emission, dtorKind);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2011-06-25 07:21:27 +08:00
|
|
|
// In GC mode, honor objc_precise_lifetime.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getLangOpts().getGC() != LangOptions::NonGC &&
|
2011-06-25 07:21:27 +08:00
|
|
|
D.hasAttr<ObjCPreciseLifetimeAttr>()) {
|
|
|
|
EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
|
|
|
|
}
|
|
|
|
|
2011-02-22 14:44:22 +08:00
|
|
|
// Handle the cleanup attribute.
|
2009-06-30 10:34:44 +08:00
|
|
|
if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
|
2009-02-08 07:51:38 +08:00
|
|
|
const FunctionDecl *FD = CA->getFunctionDecl();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-02-22 14:44:22 +08:00
|
|
|
llvm::Constant *F = CGM.GetAddrOfFunction(FD);
|
2009-02-08 07:51:38 +08:00
|
|
|
assert(F && "Could not find function!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-02-17 11:33:10 +08:00
|
|
|
const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
|
2011-02-22 14:44:22 +08:00
|
|
|
EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
|
2009-02-08 07:51:38 +08:00
|
|
|
}
|
2009-03-05 09:23:13 +08:00
|
|
|
|
2011-02-22 14:44:22 +08:00
|
|
|
// If this is a block variable, call _Block_object_destroy
|
2018-07-27 00:51:21 +08:00
|
|
|
// (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
|
|
|
|
// mode.
|
2018-10-02 05:51:28 +08:00
|
|
|
if (emission.IsEscapingByRef &&
|
|
|
|
CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
|
2018-07-27 00:51:21 +08:00
|
|
|
BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
|
|
|
|
if (emission.Variable->getType().isObjCGCWeak())
|
|
|
|
Flags |= BLOCK_FIELD_IS_WEAK;
|
|
|
|
enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
|
2018-08-10 23:09:24 +08:00
|
|
|
/*LoadBlockVarAddr*/ false,
|
|
|
|
cxxDestructorCanThrow(emission.Variable->getType()));
|
2018-07-27 00:51:21 +08:00
|
|
|
}
|
2007-06-02 12:16:21 +08:00
|
|
|
}
|
2007-06-14 04:44:40 +08:00
|
|
|
|
2012-01-26 11:33:36 +08:00
|
|
|
CodeGenFunction::Destroyer *
|
2011-07-09 09:37:26 +08:00
|
|
|
CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
|
|
|
|
switch (kind) {
|
|
|
|
case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
|
2011-07-09 17:09:00 +08:00
|
|
|
case QualType::DK_cxx_destructor:
|
2012-01-26 11:33:36 +08:00
|
|
|
return destroyCXXObject;
|
2011-07-09 17:09:00 +08:00
|
|
|
case QualType::DK_objc_strong_lifetime:
|
2012-01-26 11:33:36 +08:00
|
|
|
return destroyARCStrongPrecise;
|
2011-07-09 17:09:00 +08:00
|
|
|
case QualType::DK_objc_weak_lifetime:
|
2012-01-26 11:33:36 +08:00
|
|
|
return destroyARCWeak;
|
2018-02-28 15:15:55 +08:00
|
|
|
case QualType::DK_nontrivial_c_struct:
|
|
|
|
return destroyNonTrivialCStruct;
|
2011-07-09 09:37:26 +08:00
|
|
|
}
|
2012-01-27 08:46:27 +08:00
|
|
|
llvm_unreachable("Unknown DestructionKind");
|
2011-07-09 09:37:26 +08:00
|
|
|
}
|
|
|
|
|
2013-02-01 13:11:40 +08:00
|
|
|
/// pushEHDestroy - Push the standard destructor for the given type as
|
|
|
|
/// an EH-only cleanup.
|
|
|
|
void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr, QualType type) {
|
2013-02-01 13:11:40 +08:00
|
|
|
assert(dtorKind && "cannot push destructor for trivial type");
|
|
|
|
assert(needsEHCleanup(dtorKind));
|
|
|
|
|
|
|
|
pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// pushDestroy - Push the standard destructor for the given type as
|
|
|
|
/// at least a normal cleanup.
|
2011-07-13 00:41:08 +08:00
|
|
|
void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr, QualType type) {
|
2011-07-13 00:41:08 +08:00
|
|
|
assert(dtorKind && "cannot push destructor for trivial type");
|
|
|
|
|
|
|
|
CleanupKind cleanupKind = getCleanupKind(dtorKind);
|
|
|
|
pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
|
|
|
|
cleanupKind & EHCleanup);
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
|
2012-01-26 11:33:36 +08:00
|
|
|
QualType type, Destroyer *destroyer,
|
2011-07-11 16:38:19 +08:00
|
|
|
bool useEHCleanupForArray) {
|
2011-07-13 00:41:08 +08:00
|
|
|
pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
|
|
|
|
destroyer, useEHCleanupForArray);
|
2011-07-09 09:37:26 +08:00
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
|
2014-02-01 08:04:45 +08:00
|
|
|
EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
|
|
|
|
}
|
|
|
|
|
2013-06-13 04:42:33 +08:00
|
|
|
void CodeGenFunction::pushLifetimeExtendedDestroy(
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CleanupKind cleanupKind, Address addr, QualType type,
|
2013-06-13 04:42:33 +08:00
|
|
|
Destroyer *destroyer, bool useEHCleanupForArray) {
|
|
|
|
// Push an EH-only cleanup for the object now.
|
|
|
|
// FIXME: When popping normal cleanups, we need to keep this EH cleanup
|
|
|
|
// around in case a temporary's destructor throws an exception.
|
|
|
|
if (cleanupKind & EHCleanup)
|
|
|
|
EHStack.pushCleanup<DestroyObject>(
|
|
|
|
static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
|
|
|
|
destroyer, useEHCleanupForArray);
|
|
|
|
|
|
|
|
// Remember that we need to push a full cleanup for the object at the
|
|
|
|
// end of the full-expression.
|
|
|
|
pushCleanupAfterFullExpr<DestroyObject>(
|
|
|
|
cleanupKind, addr, type, destroyer, useEHCleanupForArray);
|
|
|
|
}
|
|
|
|
|
2011-07-11 16:38:19 +08:00
|
|
|
/// emitDestroy - Immediately perform the destruction of the given
|
|
|
|
/// object.
|
|
|
|
///
|
|
|
|
/// \param addr - the address of the object; a type*
|
|
|
|
/// \param type - the type of the object; if an array type, all
|
|
|
|
/// objects are destroyed in reverse order
|
|
|
|
/// \param destroyer - the function to call to destroy individual
|
|
|
|
/// elements
|
|
|
|
/// \param useEHCleanupForArray - whether an EH cleanup should be
|
|
|
|
/// used when destroying array elements, in case one of the
|
|
|
|
/// destructions throws an exception
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::emitDestroy(Address addr, QualType type,
|
2012-01-26 11:33:36 +08:00
|
|
|
Destroyer *destroyer,
|
2011-07-11 16:38:19 +08:00
|
|
|
bool useEHCleanupForArray) {
|
2011-07-09 09:37:26 +08:00
|
|
|
const ArrayType *arrayType = getContext().getAsArrayType(type);
|
|
|
|
if (!arrayType)
|
|
|
|
return destroyer(*this, addr, type);
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *length = emitArrayLength(arrayType, type, addr);
|
|
|
|
|
|
|
|
CharUnits elementAlign =
|
|
|
|
addr.getAlignment()
|
|
|
|
.alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
|
2011-07-13 16:09:46 +08:00
|
|
|
|
|
|
|
// Normally we have to check whether the array is zero-length.
|
|
|
|
bool checkZeroLength = true;
|
|
|
|
|
|
|
|
// But if the array length is constant, we can suppress that.
|
|
|
|
if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
|
|
|
|
// ...and if it's constant zero, we can just skip the entire thing.
|
|
|
|
if (constLength->isZero()) return;
|
|
|
|
checkZeroLength = false;
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *begin = addr.getPointer();
|
2011-07-09 09:37:26 +08:00
|
|
|
llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
emitArrayDestroy(begin, end, type, elementAlign, destroyer,
|
2011-07-13 16:09:46 +08:00
|
|
|
checkZeroLength, useEHCleanupForArray);
|
2011-07-09 09:37:26 +08:00
|
|
|
}
|
|
|
|
|
2011-07-11 16:38:19 +08:00
|
|
|
/// emitArrayDestroy - Destroys all the elements of the given array,
|
|
|
|
/// beginning from last to first. The array cannot be zero-length.
|
|
|
|
///
|
|
|
|
/// \param begin - a type* denoting the first element of the array
|
|
|
|
/// \param end - a type* denoting one past the end of the array
|
2015-09-08 17:42:41 +08:00
|
|
|
/// \param elementType - the element type of the array
|
2011-07-11 16:38:19 +08:00
|
|
|
/// \param destroyer - the function to call to destroy elements
|
|
|
|
/// \param useEHCleanup - whether to push an EH cleanup to destroy
|
|
|
|
/// the remaining elements in case the destruction of a single
|
|
|
|
/// element throws
|
2011-07-09 09:37:26 +08:00
|
|
|
void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
|
|
|
|
llvm::Value *end,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
QualType elementType,
|
|
|
|
CharUnits elementAlign,
|
2012-01-26 11:33:36 +08:00
|
|
|
Destroyer *destroyer,
|
2011-07-13 16:09:46 +08:00
|
|
|
bool checkZeroLength,
|
2011-07-11 16:38:19 +08:00
|
|
|
bool useEHCleanup) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
assert(!elementType->isArrayType());
|
2011-07-09 09:37:26 +08:00
|
|
|
|
|
|
|
// The basic structure here is a do-while loop, because we don't
|
|
|
|
// need to check for the zero-element case.
|
|
|
|
llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
|
|
|
|
llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
|
|
|
|
|
2011-07-13 16:09:46 +08:00
|
|
|
if (checkZeroLength) {
|
|
|
|
llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
|
|
|
|
"arraydestroy.isempty");
|
|
|
|
Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
|
|
|
|
}
|
|
|
|
|
2011-07-09 09:37:26 +08:00
|
|
|
// Enter the loop body, making that address the current address.
|
|
|
|
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
|
|
|
|
EmitBlock(bodyBB);
|
|
|
|
llvm::PHINode *elementPast =
|
|
|
|
Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
|
|
|
|
elementPast->addIncoming(end, entryBB);
|
|
|
|
|
|
|
|
// Shift the address back by one element.
|
|
|
|
llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
|
|
|
|
llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
|
|
|
|
"arraydestroy.element");
|
|
|
|
|
2011-07-11 16:38:19 +08:00
|
|
|
if (useEHCleanup)
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
|
|
|
|
destroyer);
|
2011-07-11 16:38:19 +08:00
|
|
|
|
2011-07-09 09:37:26 +08:00
|
|
|
// Perform the actual destruction there.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
destroyer(*this, Address(element, elementAlign), elementType);
|
2011-07-09 09:37:26 +08:00
|
|
|
|
2011-07-11 16:38:19 +08:00
|
|
|
if (useEHCleanup)
|
|
|
|
PopCleanupBlock();
|
|
|
|
|
2011-07-09 09:37:26 +08:00
|
|
|
// Check whether we've reached the end.
|
|
|
|
llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
|
|
|
|
Builder.CreateCondBr(done, doneBB, bodyBB);
|
|
|
|
elementPast->addIncoming(element, Builder.GetInsertBlock());
|
|
|
|
|
|
|
|
// Done.
|
|
|
|
EmitBlock(doneBB);
|
|
|
|
}
|
|
|
|
|
2011-07-11 16:38:19 +08:00
|
|
|
/// Perform partial array destruction as if in an EH cleanup. Unlike
|
|
|
|
/// emitArrayDestroy, the element type here may still be an array type.
|
|
|
|
static void emitPartialArrayDestroy(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *begin, llvm::Value *end,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
QualType type, CharUnits elementAlign,
|
2012-01-26 11:33:36 +08:00
|
|
|
CodeGenFunction::Destroyer *destroyer) {
|
2011-07-11 16:38:19 +08:00
|
|
|
// If the element type is itself an array, drill down.
|
2011-07-13 16:09:46 +08:00
|
|
|
unsigned arrayDepth = 0;
|
2011-07-11 16:38:19 +08:00
|
|
|
while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
|
|
|
|
// VLAs don't require a GEP index to walk into.
|
|
|
|
if (!isa<VariableArrayType>(arrayType))
|
2011-07-13 16:09:46 +08:00
|
|
|
arrayDepth++;
|
2011-07-11 16:38:19 +08:00
|
|
|
type = arrayType->getElementType();
|
|
|
|
}
|
2011-07-13 16:09:46 +08:00
|
|
|
|
|
|
|
if (arrayDepth) {
|
2015-09-15 02:57:08 +08:00
|
|
|
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
|
2011-07-13 16:09:46 +08:00
|
|
|
|
2015-09-15 02:57:08 +08:00
|
|
|
SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
|
2011-07-22 16:16:57 +08:00
|
|
|
begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
|
|
|
|
end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
|
2011-07-11 16:38:19 +08:00
|
|
|
}
|
|
|
|
|
2011-07-13 16:09:46 +08:00
|
|
|
// Destroy the array. We don't ever need an EH cleanup because we
|
|
|
|
// assume that we're in an EH cleanup ourselves, so a throwing
|
|
|
|
// destructor causes an immediate terminate.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
|
2011-07-13 16:09:46 +08:00
|
|
|
/*checkZeroLength*/ true, /*useEHCleanup*/ false);
|
2011-07-11 16:38:19 +08:00
|
|
|
}
|
|
|
|
|
2011-07-09 09:37:26 +08:00
|
|
|
namespace {
|
2011-07-11 16:38:19 +08:00
|
|
|
/// RegularPartialArrayDestroy - a cleanup which performs a partial
|
|
|
|
/// array destroy where the end pointer is regularly determined and
|
|
|
|
/// does not need to be loaded from a local.
|
2015-08-19 06:40:54 +08:00
|
|
|
class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
|
2011-07-11 16:38:19 +08:00
|
|
|
llvm::Value *ArrayBegin;
|
|
|
|
llvm::Value *ArrayEnd;
|
|
|
|
QualType ElementType;
|
2012-01-26 11:33:36 +08:00
|
|
|
CodeGenFunction::Destroyer *Destroyer;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits ElementAlign;
|
2011-07-11 16:38:19 +08:00
|
|
|
public:
|
|
|
|
RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
QualType elementType, CharUnits elementAlign,
|
2011-07-11 16:38:19 +08:00
|
|
|
CodeGenFunction::Destroyer *destroyer)
|
|
|
|
: ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ElementType(elementType), Destroyer(destroyer),
|
|
|
|
ElementAlign(elementAlign) {}
|
2011-07-11 16:38:19 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-07-11 16:38:19 +08:00
|
|
|
emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ElementType, ElementAlign, Destroyer);
|
2011-07-11 16:38:19 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// IrregularPartialArrayDestroy - a cleanup which performs a
|
|
|
|
/// partial array destroy where the end pointer is irregularly
|
|
|
|
/// determined and must be loaded from a local.
|
2015-08-19 06:40:54 +08:00
|
|
|
class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
|
2011-07-09 09:37:26 +08:00
|
|
|
llvm::Value *ArrayBegin;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ArrayEndPointer;
|
2011-07-09 09:37:26 +08:00
|
|
|
QualType ElementType;
|
2012-01-26 11:33:36 +08:00
|
|
|
CodeGenFunction::Destroyer *Destroyer;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits ElementAlign;
|
2011-07-09 09:37:26 +08:00
|
|
|
public:
|
2011-07-11 16:38:19 +08:00
|
|
|
IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address arrayEndPointer,
|
2011-07-11 16:38:19 +08:00
|
|
|
QualType elementType,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits elementAlign,
|
2011-07-11 16:38:19 +08:00
|
|
|
CodeGenFunction::Destroyer *destroyer)
|
2011-07-09 09:37:26 +08:00
|
|
|
: ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ElementType(elementType), Destroyer(destroyer),
|
|
|
|
ElementAlign(elementAlign) {}
|
2011-07-09 09:37:26 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-07-09 09:37:26 +08:00
|
|
|
llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
|
2011-07-11 16:38:19 +08:00
|
|
|
emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ElementType, ElementAlign, Destroyer);
|
2011-07-09 09:37:26 +08:00
|
|
|
}
|
|
|
|
};
|
2016-02-11 03:11:58 +08:00
|
|
|
} // end anonymous namespace
|
2011-07-09 09:37:26 +08:00
|
|
|
|
2011-07-11 16:38:19 +08:00
|
|
|
/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
|
|
|
|
/// already-constructed elements of the given array. The cleanup
|
|
|
|
/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
|
2011-08-24 06:38:00 +08:00
|
|
|
///
|
2011-07-11 16:38:19 +08:00
|
|
|
/// \param elementType - the immediate element type of the array;
|
|
|
|
/// possibly still an array type
|
2011-07-13 00:41:08 +08:00
|
|
|
void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address arrayEndPointer,
|
2011-07-11 16:38:19 +08:00
|
|
|
QualType elementType,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits elementAlign,
|
2012-01-26 11:33:36 +08:00
|
|
|
Destroyer *destroyer) {
|
2011-07-13 00:41:08 +08:00
|
|
|
pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
|
|
|
|
arrayBegin, arrayEndPointer,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
elementType, elementAlign,
|
|
|
|
destroyer);
|
2011-07-11 16:38:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
|
2011-07-09 09:37:26 +08:00
|
|
|
/// already-constructed elements of the given array. The cleanup
|
2011-07-11 16:38:19 +08:00
|
|
|
/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
|
2011-08-24 06:38:00 +08:00
|
|
|
///
|
2011-07-09 09:37:26 +08:00
|
|
|
/// \param elementType - the immediate element type of the array;
|
|
|
|
/// possibly still an array type
|
2011-07-11 16:38:19 +08:00
|
|
|
void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
|
|
|
|
llvm::Value *arrayEnd,
|
|
|
|
QualType elementType,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits elementAlign,
|
2012-01-26 11:33:36 +08:00
|
|
|
Destroyer *destroyer) {
|
2011-07-13 00:41:08 +08:00
|
|
|
pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
|
2011-07-11 16:38:19 +08:00
|
|
|
arrayBegin, arrayEnd,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
elementType, elementAlign,
|
|
|
|
destroyer);
|
2011-07-09 09:37:26 +08:00
|
|
|
}
|
|
|
|
|
2013-03-23 14:43:35 +08:00
|
|
|
/// Lazily declare the @llvm.lifetime.start intrinsic.
|
2019-02-06 03:17:50 +08:00
|
|
|
llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
|
2017-04-11 04:18:45 +08:00
|
|
|
if (LifetimeStartFn)
|
|
|
|
return LifetimeStartFn;
|
2013-03-23 14:43:35 +08:00
|
|
|
LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
|
2017-04-18 04:03:11 +08:00
|
|
|
llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
|
2013-03-23 14:43:35 +08:00
|
|
|
return LifetimeStartFn;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Lazily declare the @llvm.lifetime.end intrinsic.
|
2019-02-06 03:17:50 +08:00
|
|
|
llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
|
2017-04-11 04:18:45 +08:00
|
|
|
if (LifetimeEndFn)
|
|
|
|
return LifetimeEndFn;
|
2013-03-23 14:43:35 +08:00
|
|
|
LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
|
2017-04-18 04:03:11 +08:00
|
|
|
llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
|
2013-03-23 14:43:35 +08:00
|
|
|
return LifetimeEndFn;
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
namespace {
|
|
|
|
/// A cleanup to perform a release of an object at the end of a
|
|
|
|
/// function. This is used to balance out the incoming +1 of a
|
|
|
|
/// ns_consumed argument when we can't reasonably do that just by
|
|
|
|
/// not doing the initial retain for a __block argument.
|
2015-08-19 06:40:54 +08:00
|
|
|
struct ConsumeARCParameter final : EHScopeStack::Cleanup {
|
2013-03-13 11:10:54 +08:00
|
|
|
ConsumeARCParameter(llvm::Value *param,
|
|
|
|
ARCPreciseLifetime_t precise)
|
|
|
|
: Param(param), Precise(precise) {}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
llvm::Value *Param;
|
2013-03-13 11:10:54 +08:00
|
|
|
ARCPreciseLifetime_t Precise;
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2013-03-13 11:10:54 +08:00
|
|
|
CGF.EmitARCRelease(Param, Precise);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
};
|
2016-02-11 03:11:58 +08:00
|
|
|
} // end anonymous namespace
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
/// Emit an alloca (or GlobalValue depending on target)
|
2008-05-08 13:58:21 +08:00
|
|
|
/// for the specified parameter and set up LocalDeclMap.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
|
|
|
|
unsigned ArgNo) {
|
2008-08-16 11:19:19 +08:00
|
|
|
// FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
|
2008-10-31 17:52:39 +08:00
|
|
|
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
|
2008-08-16 11:19:19 +08:00
|
|
|
"Invalid argument to EmitParmDecl");
|
2011-02-23 06:38:33 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Arg.getAnyValue()->setName(D.getName());
|
2011-02-23 06:38:33 +08:00
|
|
|
|
2013-03-15 01:53:33 +08:00
|
|
|
QualType Ty = D.getType();
|
|
|
|
|
2011-02-23 06:38:33 +08:00
|
|
|
// Use better IR generation for certain implicit parameters.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
|
2011-02-23 06:38:33 +08:00
|
|
|
// The only implicit argument a block has is its literal.
|
2018-02-22 05:47:51 +08:00
|
|
|
// This may be passed as an inalloca'ed value on Windows x86.
|
2011-02-23 06:38:33 +08:00
|
|
|
if (BlockInfo) {
|
2018-02-22 05:47:51 +08:00
|
|
|
llvm::Value *V = Arg.isIndirect()
|
|
|
|
? Builder.CreateLoad(Arg.getIndirectAddress())
|
|
|
|
: Arg.getDirectValue();
|
|
|
|
setBlockContextParameter(IPD, ArgNo, V);
|
2011-02-23 06:38:33 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address DeclPtr = Address::invalid();
|
2014-02-01 08:04:45 +08:00
|
|
|
bool DoStore = false;
|
|
|
|
bool IsScalar = hasScalarEvaluationKind(Ty);
|
|
|
|
// If we already have a pointer to the argument, reuse the input pointer.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
if (Arg.isIndirect()) {
|
|
|
|
DeclPtr = Arg.getIndirectAddress();
|
2014-04-02 08:16:53 +08:00
|
|
|
// If we have a prettier pointer type at this point, bitcast to that.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
unsigned AS = DeclPtr.getType()->getAddressSpace();
|
2014-04-02 08:16:53 +08:00
|
|
|
llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
if (DeclPtr.getType() != IRTy)
|
|
|
|
DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
|
2018-03-15 23:25:19 +08:00
|
|
|
// Indirect argument is in alloca address space, which may be different
|
|
|
|
// from the default address space.
|
|
|
|
auto AllocaAS = CGM.getASTAllocaAddressSpace();
|
|
|
|
auto *V = DeclPtr.getPointer();
|
|
|
|
auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
|
|
|
|
auto DestLangAS =
|
|
|
|
getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
|
|
|
|
if (SrcLangAS != DestLangAS) {
|
|
|
|
assert(getContext().getTargetAddressSpace(SrcLangAS) ==
|
|
|
|
CGM.getDataLayout().getAllocaAddrSpace());
|
|
|
|
auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
|
|
|
|
auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
|
|
|
|
DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
|
|
|
|
*this, V, SrcLangAS, DestLangAS, T, true),
|
|
|
|
DeclPtr.getAlignment());
|
|
|
|
}
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
|
2013-06-21 20:45:15 +08:00
|
|
|
// Push a destructor cleanup for this parameter if the ABI requires it.
|
2014-07-26 05:39:46 +08:00
|
|
|
// Don't push a cleanup in a thunk for a method that will also emit a
|
|
|
|
// cleanup.
|
2018-05-16 05:00:30 +08:00
|
|
|
if (hasAggregateEvaluationKind(Ty) && !CurFuncIsThunk &&
|
2019-10-03 19:22:48 +08:00
|
|
|
Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
|
2019-09-29 13:08:46 +08:00
|
|
|
if (QualType::DestructionKind DtorKind =
|
|
|
|
D.needsDestruction(getContext())) {
|
2018-02-28 15:15:55 +08:00
|
|
|
assert((DtorKind == QualType::DK_cxx_destructor ||
|
|
|
|
DtorKind == QualType::DK_nontrivial_c_struct) &&
|
|
|
|
"unexpected destructor type");
|
|
|
|
pushDestroy(DtorKind, DeclPtr, Ty);
|
2018-04-27 14:57:00 +08:00
|
|
|
CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
|
|
|
|
EHStack.stable_begin();
|
2018-02-28 15:15:55 +08:00
|
|
|
}
|
2013-06-21 20:45:15 +08:00
|
|
|
}
|
2007-06-14 04:44:40 +08:00
|
|
|
} else {
|
2018-03-14 22:17:45 +08:00
|
|
|
// Check if the parameter address is controlled by OpenMP runtime.
|
|
|
|
Address OpenMPLocalAddr =
|
|
|
|
getLangOpts().OpenMP
|
|
|
|
? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
|
|
|
|
: Address::invalid();
|
|
|
|
if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
|
|
|
|
DeclPtr = OpenMPLocalAddr;
|
|
|
|
} else {
|
|
|
|
// Otherwise, create a temporary to hold the value.
|
|
|
|
DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
|
|
|
|
D.getName() + ".addr");
|
|
|
|
}
|
2014-02-01 08:04:45 +08:00
|
|
|
DoStore = true;
|
|
|
|
}
|
2010-02-09 06:53:07 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
|
|
|
|
|
|
|
|
LValue lv = MakeAddrLValue(DeclPtr, Ty);
|
2014-02-01 08:04:45 +08:00
|
|
|
if (IsScalar) {
|
2011-06-16 07:02:42 +08:00
|
|
|
Qualifiers qs = Ty.getQualifiers();
|
|
|
|
if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
|
|
|
|
// We honor __attribute__((ns_consumed)) for types with lifetime.
|
|
|
|
// For __strong, it's handled by just skipping the initial retain;
|
|
|
|
// otherwise we have to balance out the initial +1 with an extra
|
|
|
|
// cleanup to do the release at the end of the function.
|
|
|
|
bool isConsumed = D.hasAttr<NSConsumedAttr>();
|
|
|
|
|
[ObjCARC] Add an new attribute, objc_externally_retained
This attribute, called "objc_externally_retained", exposes clang's
notion of pseudo-__strong variables in ARC. Pseudo-strong variables
"borrow" their initializer, meaning that they don't retain/release
it, instead assuming that someone else is keeping their value alive.
If a function is annotated with this attribute, implicitly strong
parameters of that function aren't implicitly retained/released in
the function body, and are implicitly const. This is useful to expose
for performance reasons, most functions don't need the extra safety
of the retain/release, so programmers can opt out as needed.
This attribute can also apply to declarations of local variables,
with similar effect.
Differential revision: https://reviews.llvm.org/D55865
llvm-svn: 350422
2019-01-05 02:33:06 +08:00
|
|
|
// If a parameter is pseudo-strong then we can omit the implicit retain.
|
2011-06-17 14:42:21 +08:00
|
|
|
if (D.isARCPseudoStrong()) {
|
[ObjCARC] Add an new attribute, objc_externally_retained
This attribute, called "objc_externally_retained", exposes clang's
notion of pseudo-__strong variables in ARC. Pseudo-strong variables
"borrow" their initializer, meaning that they don't retain/release
it, instead assuming that someone else is keeping their value alive.
If a function is annotated with this attribute, implicitly strong
parameters of that function aren't implicitly retained/released in
the function body, and are implicitly const. This is useful to expose
for performance reasons, most functions don't need the extra safety
of the retain/release, so programmers can opt out as needed.
This attribute can also apply to declarations of local variables,
with similar effect.
Differential revision: https://reviews.llvm.org/D55865
llvm-svn: 350422
2019-01-05 02:33:06 +08:00
|
|
|
assert(lt == Qualifiers::OCL_Strong &&
|
|
|
|
"pseudo-strong variable isn't strong?");
|
|
|
|
assert(qs.hasConst() && "pseudo-strong variable should be const!");
|
2011-06-16 07:02:42 +08:00
|
|
|
lt = Qualifiers::OCL_ExplicitNone;
|
|
|
|
}
|
|
|
|
|
2017-06-28 02:37:51 +08:00
|
|
|
// Load objects passed indirectly.
|
|
|
|
if (Arg.isIndirect() && !ArgVal)
|
|
|
|
ArgVal = Builder.CreateLoad(DeclPtr);
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
if (lt == Qualifiers::OCL_Strong) {
|
2013-02-21 08:40:10 +08:00
|
|
|
if (!isConsumed) {
|
|
|
|
if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
|
|
|
|
// use objc_storeStrong(&dest, value) for retaining the
|
|
|
|
// object. But first, store a null into 'dest' because
|
|
|
|
// objc_storeStrong attempts to release its old value.
|
2013-10-02 10:29:49 +08:00
|
|
|
llvm::Value *Null = CGM.EmitNullConstant(D.getType());
|
2013-02-21 08:40:10 +08:00
|
|
|
EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
|
2019-12-04 07:17:01 +08:00
|
|
|
EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true);
|
2014-02-01 08:04:45 +08:00
|
|
|
DoStore = false;
|
2013-02-21 08:40:10 +08:00
|
|
|
}
|
|
|
|
else
|
2011-06-16 07:02:42 +08:00
|
|
|
// Don't use objc_retainBlock for block pointers, because we
|
|
|
|
// don't want to Block_copy something just because we got it
|
|
|
|
// as a parameter.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
ArgVal = EmitARCRetainNonBlock(ArgVal);
|
2013-02-21 08:40:10 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
} else {
|
|
|
|
// Push the cleanup for a consumed parameter.
|
2013-03-13 11:10:54 +08:00
|
|
|
if (isConsumed) {
|
|
|
|
ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
|
|
|
|
? ARCPreciseLifetime : ARCImpreciseLifetime);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
|
2013-03-13 11:10:54 +08:00
|
|
|
precise);
|
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
if (lt == Qualifiers::OCL_Weak) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitARCInitWeak(DeclPtr, ArgVal);
|
2014-02-01 08:04:45 +08:00
|
|
|
DoStore = false; // The weak init is a store, no need to do two.
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enter the cleanup scope.
|
|
|
|
EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
|
|
|
|
}
|
2007-06-14 04:44:40 +08:00
|
|
|
}
|
|
|
|
|
2014-02-01 08:04:45 +08:00
|
|
|
// Store the initial value into the alloca.
|
|
|
|
if (DoStore)
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
|
2014-02-01 08:04:45 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
setAddrOfLocalVar(&D, DeclPtr);
|
2008-05-30 18:30:31 +08:00
|
|
|
|
2019-07-30 06:49:55 +08:00
|
|
|
// Emit debug info for param declarations in non-thunk functions.
|
2012-05-04 15:39:27 +08:00
|
|
|
if (CGDebugInfo *DI = getDebugInfo()) {
|
2016-02-02 19:06:51 +08:00
|
|
|
if (CGM.getCodeGenOpts().getDebugInfo() >=
|
2019-07-30 06:49:55 +08:00
|
|
|
codegenoptions::LimitedDebugInfo &&
|
|
|
|
!CurFuncIsThunk) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder);
|
2012-05-04 15:39:27 +08:00
|
|
|
}
|
|
|
|
}
|
2011-09-10 06:41:49 +08:00
|
|
|
|
|
|
|
if (D.hasAttr<AnnotateAttr>())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitVarAnnotations(&D, DeclPtr.getPointer());
|
2017-03-14 09:56:34 +08:00
|
|
|
|
|
|
|
// We can only check return value nullability if all arguments to the
|
|
|
|
// function satisfy their nullability preconditions. This makes it necessary
|
|
|
|
// to emit null checks for args in the function body itself.
|
|
|
|
if (requiresReturnValueNullabilityCheck()) {
|
|
|
|
auto Nullability = Ty->getNullability(getContext());
|
|
|
|
if (Nullability && *Nullability == NullabilityKind::NonNull) {
|
|
|
|
SanitizerScope SanScope(this);
|
|
|
|
RetValNullabilityPrecondition =
|
|
|
|
Builder.CreateAnd(RetValNullabilityPrecondition,
|
|
|
|
Builder.CreateIsNotNull(Arg.getAnyValue()));
|
|
|
|
}
|
|
|
|
}
|
2007-06-14 04:44:40 +08:00
|
|
|
}
|
2016-03-03 13:21:39 +08:00
|
|
|
|
2016-03-04 17:22:22 +08:00
|
|
|
void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
|
|
|
|
CodeGenFunction *CGF) {
|
|
|
|
if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
|
|
|
|
return;
|
|
|
|
getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
|
|
|
|
}
|
2018-09-26 12:28:39 +08:00
|
|
|
|
2019-02-02 04:25:04 +08:00
|
|
|
void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
|
2019-08-06 02:43:21 +08:00
|
|
|
CodeGenFunction *CGF) {
|
|
|
|
if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
|
|
|
|
(!LangOpts.EmitAllDecls && !D->isUsed()))
|
2019-02-02 04:25:04 +08:00
|
|
|
return;
|
2019-08-06 02:43:21 +08:00
|
|
|
getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
|
2019-02-02 04:25:04 +08:00
|
|
|
}
|
|
|
|
|
2018-09-26 12:28:39 +08:00
|
|
|
void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
|
2019-03-20 01:09:52 +08:00
|
|
|
getOpenMPRuntime().checkArchForUnifiedAddressing(D);
|
2018-09-26 12:28:39 +08:00
|
|
|
}
|