2017-02-22 04:31:01 +08:00
|
|
|
//===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===//
|
2007-08-22 01:43:55 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-08-22 01:43:55 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code to emit Objective-C code as LLVM code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2011-01-19 09:36:36 +08:00
|
|
|
#include "CGDebugInfo.h"
|
2008-04-09 23:51:31 +08:00
|
|
|
#include "CGObjCRuntime.h"
|
2007-08-22 01:43:55 +08:00
|
|
|
#include "CodeGenFunction.h"
|
|
|
|
#include "CodeGenModule.h"
|
2011-06-16 07:02:42 +08:00
|
|
|
#include "TargetInfo.h"
|
2008-08-29 16:11:39 +08:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2008-08-11 13:35:13 +08:00
|
|
|
#include "clang/AST/DeclObjC.h"
|
2009-04-26 09:32:48 +08:00
|
|
|
#include "clang/AST/StmtObjC.h"
|
2008-09-03 08:27:26 +08:00
|
|
|
#include "clang/Basic/Diagnostic.h"
|
2013-10-31 05:53:58 +08:00
|
|
|
#include "clang/CodeGen/CGFunctionInfo.h"
|
2008-08-31 03:51:14 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2014-03-04 19:02:08 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/InlineAsm.h"
|
2007-08-22 01:43:55 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
|
|
|
|
static TryEmitResult
|
|
|
|
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
|
Substitute type arguments into uses of Objective-C interface members.
When messaging a method that was defined in an Objective-C class (or
category or extension thereof) that has type parameters, substitute
the type arguments for those type parameters. Similarly, substitute
into property accesses, instance variables, and other references.
This includes general infrastructure for substituting the type
arguments associated with an ObjCObject(Pointer)Type into a type
referenced within a particular context, handling all of the
substitutions required to deal with (e.g.) inheritance involving
parameterized classes. In cases where no type arguments are available
(e.g., because we're messaging via some unspecialized type, id, etc.),
we substitute in the type bounds for the type parameters instead.
Example:
@interface NSSet<T : id<NSCopying>> : NSObject <NSCopying>
- (T)firstObject;
@end
void f(NSSet<NSString *> *stringSet, NSSet *anySet) {
[stringSet firstObject]; // produces NSString*
[anySet firstObject]; // produces id<NSCopying> (the bound)
}
When substituting for the type parameters given an unspecialized
context (i.e., no specific type arguments were given), substituting
the type bounds unconditionally produces type signatures that are too
strong compared to the pre-generics signatures. Instead, use the
following rule:
- In covariant positions, such as method return types, replace type
parameters with “id” or “Class” (the latter only when the type
parameter bound is “Class” or qualified class, e.g,
“Class<NSCopying>”)
- In other positions (e.g., parameter types), replace type
parameters with their type bounds.
- When a specialized Objective-C object or object pointer type
contains a type parameter in its type arguments (e.g.,
NSArray<T>*, but not NSArray<NSString *> *), replace the entire
object/object pointer type with its unspecialized version (e.g.,
NSArray *).
llvm-svn: 241543
2015-07-07 11:57:53 +08:00
|
|
|
static RValue AdjustObjCObjectType(CodeGenFunction &CGF,
|
|
|
|
QualType ET,
|
|
|
|
RValue Result);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
/// Given the address of a variable of pointer type, find the correct
|
|
|
|
/// null to store into it.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
static llvm::Constant *getNullForVariable(Address addr) {
|
|
|
|
llvm::Type *type = addr.getElementType();
|
2011-06-16 07:02:42 +08:00
|
|
|
return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
|
|
|
|
}
|
|
|
|
|
2008-06-25 01:04:18 +08:00
|
|
|
/// Emits an instance of NSConstantString representing the object.
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
|
2008-11-26 05:53:21 +08:00
|
|
|
{
|
2018-07-31 03:24:48 +08:00
|
|
|
llvm::Constant *C =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer();
|
2008-08-20 08:28:19 +08:00
|
|
|
// FIXME: This bitcast should just be made an invariant on the Runtime.
|
2009-07-30 02:54:39 +08:00
|
|
|
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
|
2008-06-25 01:04:18 +08:00
|
|
|
}
|
|
|
|
|
2012-04-19 08:25:12 +08:00
|
|
|
/// EmitObjCBoxedExpr - This routine generates code to call
|
|
|
|
/// the appropriate expression boxing method. This will either be
|
2015-06-26 13:28:36 +08:00
|
|
|
/// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:],
|
|
|
|
/// or [NSValue valueWithBytes:objCType:].
|
2012-03-07 04:05:56 +08:00
|
|
|
///
|
2012-03-30 01:31:31 +08:00
|
|
|
llvm::Value *
|
2012-04-19 08:25:12 +08:00
|
|
|
CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
|
2012-03-07 04:05:56 +08:00
|
|
|
// Generate the correct selector for this literal's concrete type.
|
|
|
|
// Get the method.
|
2012-04-19 08:25:12 +08:00
|
|
|
const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
|
2015-06-26 13:28:36 +08:00
|
|
|
const Expr *SubExpr = E->getSubExpr();
|
2012-04-19 08:25:12 +08:00
|
|
|
assert(BoxingMethod && "BoxingMethod is null");
|
|
|
|
assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method");
|
|
|
|
Selector Sel = BoxingMethod->getSelector();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-03-07 04:05:56 +08:00
|
|
|
// Generate a reference to the class pointer, which will be the receiver.
|
2012-04-19 08:25:12 +08:00
|
|
|
// Assumes that the method was introduced in the class that should be
|
|
|
|
// messaged (avoids pulling it out of the result type).
|
2012-03-07 04:05:56 +08:00
|
|
|
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
|
2012-04-19 08:25:12 +08:00
|
|
|
const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
|
2013-03-01 03:01:20 +08:00
|
|
|
llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl);
|
2014-12-19 01:13:56 +08:00
|
|
|
|
2012-03-07 04:05:56 +08:00
|
|
|
CallArgList Args;
|
2015-06-26 13:28:36 +08:00
|
|
|
const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin();
|
|
|
|
QualType ArgQT = ArgDecl->getType().getUnqualifiedType();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
|
|
// ObjCBoxedExpr supports boxing of structs and unions
|
2015-06-26 13:28:36 +08:00
|
|
|
// via [NSValue valueWithBytes:objCType:]
|
|
|
|
const QualType ValueType(SubExpr->getType().getCanonicalType());
|
|
|
|
if (ValueType->isObjCBoxableRecordType()) {
|
|
|
|
// Emit CodeGen for first parameter
|
|
|
|
// and cast value to correct type
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Temporary = CreateMemTemp(SubExpr->getType());
|
2015-06-26 13:28:36 +08:00
|
|
|
EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT));
|
|
|
|
Args.add(RValue::get(BitCast.getPointer()), ArgQT);
|
2015-06-26 13:28:36 +08:00
|
|
|
|
|
|
|
// Create char array to store type encoding
|
|
|
|
std::string Str;
|
|
|
|
getContext().getObjCEncodingForType(ValueType, Str);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2015-06-26 13:28:36 +08:00
|
|
|
// Cast type encoding to correct type
|
|
|
|
const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1];
|
|
|
|
QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType();
|
|
|
|
llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT));
|
|
|
|
|
|
|
|
Args.add(RValue::get(Cast), EncodingQT);
|
|
|
|
} else {
|
|
|
|
Args.add(EmitAnyExpr(SubExpr), ArgQT);
|
|
|
|
}
|
2014-01-26 00:55:45 +08:00
|
|
|
|
|
|
|
RValue result = Runtime.GenerateMessageSend(
|
|
|
|
*this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver,
|
|
|
|
Args, ClassDecl, BoxingMethod);
|
2018-07-31 03:24:48 +08:00
|
|
|
return Builder.CreateBitCast(result.getScalarVal(),
|
2012-03-07 04:05:56 +08:00
|
|
|
ConvertType(E->getType()));
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
|
2014-10-29 02:28:16 +08:00
|
|
|
const ObjCMethodDecl *MethodWithObjects) {
|
2012-03-07 04:05:56 +08:00
|
|
|
ASTContext &Context = CGM.getContext();
|
2014-05-21 13:09:00 +08:00
|
|
|
const ObjCDictionaryLiteral *DLE = nullptr;
|
2012-03-07 04:05:56 +08:00
|
|
|
const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
|
|
|
|
if (!ALE)
|
|
|
|
DLE = cast<ObjCDictionaryLiteral>(E);
|
2017-04-15 14:42:00 +08:00
|
|
|
|
|
|
|
// Optimize empty collections by referencing constants, when available.
|
2018-07-31 03:24:48 +08:00
|
|
|
uint64_t NumElements =
|
2012-03-07 04:05:56 +08:00
|
|
|
ALE ? ALE->getNumElements() : DLE->getNumElements();
|
2017-04-15 14:42:00 +08:00
|
|
|
if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) {
|
|
|
|
StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__";
|
|
|
|
QualType IdTy(CGM.getContext().getObjCIdType());
|
|
|
|
llvm::Constant *Constant =
|
|
|
|
CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName);
|
2017-04-17 23:21:55 +08:00
|
|
|
LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy);
|
2018-08-10 05:08:08 +08:00
|
|
|
llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc());
|
2017-04-17 23:21:55 +08:00
|
|
|
cast<llvm::LoadInst>(Ptr)->setMetadata(
|
|
|
|
CGM.getModule().getMDKindID("invariant.load"),
|
|
|
|
llvm::MDNode::get(getLLVMContext(), None));
|
|
|
|
return Builder.CreateBitCast(Ptr, ConvertType(E->getType()));
|
2017-04-15 14:42:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the type of the array we're initializing.
|
2012-03-07 04:05:56 +08:00
|
|
|
llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
|
|
|
|
NumElements);
|
|
|
|
QualType ElementType = Context.getObjCIdType().withConst();
|
2018-07-31 03:24:48 +08:00
|
|
|
QualType ElementArrayType
|
|
|
|
= Context.getConstantArrayType(ElementType, APNumElements,
|
2012-03-07 04:05:56 +08:00
|
|
|
ArrayType::Normal, /*IndexTypeQuals=*/0);
|
|
|
|
|
|
|
|
// Allocate the temporary array(s).
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Objects = CreateMemTemp(ElementArrayType, "objects");
|
|
|
|
Address Keys = Address::invalid();
|
2012-03-07 04:05:56 +08:00
|
|
|
if (DLE)
|
|
|
|
Keys = CreateMemTemp(ElementArrayType, "keys");
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2013-04-04 08:20:38 +08:00
|
|
|
// In ARC, we may need to do extra work to keep all the keys and
|
|
|
|
// values alive until after the call.
|
|
|
|
SmallVector<llvm::Value *, 16> NeededObjects;
|
|
|
|
bool TrackNeededObjects =
|
|
|
|
(getLangOpts().ObjCAutoRefCount &&
|
|
|
|
CGM.getCodeGenOpts().OptimizationLevel != 0);
|
|
|
|
|
2012-03-07 04:05:56 +08:00
|
|
|
// Perform the actual initialialization of the array(s).
|
|
|
|
for (uint64_t i = 0; i < NumElements; i++) {
|
|
|
|
if (ALE) {
|
2013-04-04 08:20:38 +08:00
|
|
|
// Emit the element and store it to the appropriate array slot.
|
2012-03-07 04:05:56 +08:00
|
|
|
const Expr *Rhs = ALE->getElement(i);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
LValue LV = MakeAddrLValue(
|
|
|
|
Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
|
2017-10-10 17:39:32 +08:00
|
|
|
ElementType, AlignmentSource::Decl);
|
2013-04-04 08:20:38 +08:00
|
|
|
|
|
|
|
llvm::Value *value = EmitScalarExpr(Rhs);
|
|
|
|
EmitStoreThroughLValue(RValue::get(value), LV, true);
|
|
|
|
if (TrackNeededObjects) {
|
|
|
|
NeededObjects.push_back(value);
|
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
} else {
|
2013-04-04 08:20:38 +08:00
|
|
|
// Emit the key and store it to the appropriate array slot.
|
2012-03-07 04:05:56 +08:00
|
|
|
const Expr *Key = DLE->getKeyValueElement(i).Key;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
LValue KeyLV = MakeAddrLValue(
|
|
|
|
Builder.CreateConstArrayGEP(Keys, i, getPointerSize()),
|
2017-10-10 17:39:32 +08:00
|
|
|
ElementType, AlignmentSource::Decl);
|
2013-04-04 08:20:38 +08:00
|
|
|
llvm::Value *keyValue = EmitScalarExpr(Key);
|
|
|
|
EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true);
|
2012-03-07 04:05:56 +08:00
|
|
|
|
2013-04-04 08:20:38 +08:00
|
|
|
// Emit the value and store it to the appropriate array slot.
|
2015-04-06 06:45:47 +08:00
|
|
|
const Expr *Value = DLE->getKeyValueElement(i).Value;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
LValue ValueLV = MakeAddrLValue(
|
|
|
|
Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
|
2017-10-10 17:39:32 +08:00
|
|
|
ElementType, AlignmentSource::Decl);
|
2013-04-04 08:20:38 +08:00
|
|
|
llvm::Value *valueValue = EmitScalarExpr(Value);
|
|
|
|
EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true);
|
|
|
|
if (TrackNeededObjects) {
|
|
|
|
NeededObjects.push_back(keyValue);
|
|
|
|
NeededObjects.push_back(valueValue);
|
|
|
|
}
|
2012-03-07 04:05:56 +08:00
|
|
|
}
|
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-03-07 04:05:56 +08:00
|
|
|
// Generate the argument list.
|
2018-07-31 03:24:48 +08:00
|
|
|
CallArgList Args;
|
2012-03-07 04:05:56 +08:00
|
|
|
ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
|
|
|
|
const ParmVarDecl *argDecl = *PI++;
|
|
|
|
QualType ArgQT = argDecl->getType().getUnqualifiedType();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Args.add(RValue::get(Objects.getPointer()), ArgQT);
|
2012-03-07 04:05:56 +08:00
|
|
|
if (DLE) {
|
|
|
|
argDecl = *PI++;
|
|
|
|
ArgQT = argDecl->getType().getUnqualifiedType();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Args.add(RValue::get(Keys.getPointer()), ArgQT);
|
2012-03-07 04:05:56 +08:00
|
|
|
}
|
|
|
|
argDecl = *PI;
|
|
|
|
ArgQT = argDecl->getType().getUnqualifiedType();
|
2018-07-31 03:24:48 +08:00
|
|
|
llvm::Value *Count =
|
2012-03-07 04:05:56 +08:00
|
|
|
llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements);
|
|
|
|
Args.add(RValue::get(Count), ArgQT);
|
|
|
|
|
|
|
|
// Generate a reference to the class pointer, which will be the receiver.
|
|
|
|
Selector Sel = MethodWithObjects->getSelector();
|
|
|
|
QualType ResultType = E->getType();
|
|
|
|
const ObjCObjectPointerType *InterfacePointerType
|
|
|
|
= ResultType->getAsObjCInterfacePointerType();
|
2018-07-31 03:24:48 +08:00
|
|
|
ObjCInterfaceDecl *Class
|
2012-03-07 04:05:56 +08:00
|
|
|
= InterfacePointerType->getObjectType()->getInterface();
|
|
|
|
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
|
2013-03-01 03:01:20 +08:00
|
|
|
llvm::Value *Receiver = Runtime.GetClass(*this, Class);
|
2012-03-07 04:05:56 +08:00
|
|
|
|
|
|
|
// Generate the message send.
|
2014-01-26 00:55:45 +08:00
|
|
|
RValue result = Runtime.GenerateMessageSend(
|
|
|
|
*this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel,
|
|
|
|
Receiver, Args, Class, MethodWithObjects);
|
2013-04-04 08:20:38 +08:00
|
|
|
|
|
|
|
// The above message send needs these objects, but in ARC they are
|
|
|
|
// passed in a buffer that is essentially __unsafe_unretained.
|
|
|
|
// Therefore we must prevent the optimizer from releasing them until
|
|
|
|
// after the call.
|
|
|
|
if (TrackNeededObjects) {
|
|
|
|
EmitARCIntrinsicUse(NeededObjects);
|
|
|
|
}
|
|
|
|
|
2018-07-31 03:24:48 +08:00
|
|
|
return Builder.CreateBitCast(result.getScalarVal(),
|
2012-03-07 04:05:56 +08:00
|
|
|
ConvertType(E->getType()));
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
|
2014-10-29 02:28:16 +08:00
|
|
|
return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod());
|
2012-03-07 04:05:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
|
|
|
|
const ObjCDictionaryLiteral *E) {
|
2014-10-29 02:28:16 +08:00
|
|
|
return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod());
|
2012-03-07 04:05:56 +08:00
|
|
|
}
|
|
|
|
|
2008-06-25 01:04:18 +08:00
|
|
|
/// Emit a selector.
|
|
|
|
llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
|
|
|
|
// Untyped selector.
|
|
|
|
// Note that this implementation allows for non-constant strings to be passed
|
|
|
|
// as arguments to @selector(). Currently, the only thing preventing this
|
|
|
|
// behaviour is the type checking in the front end.
|
2013-03-01 03:01:20 +08:00
|
|
|
return CGM.getObjCRuntime().GetSelector(*this, E->getSelector());
|
2008-06-25 01:04:18 +08:00
|
|
|
}
|
|
|
|
|
2008-08-20 08:28:19 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
|
|
|
|
// FIXME: This should pass the Decl not the name.
|
2013-03-01 03:01:20 +08:00
|
|
|
return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol());
|
2008-08-20 08:28:19 +08:00
|
|
|
}
|
2008-06-25 01:04:18 +08:00
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Adjust the type of an Objective-C object that doesn't match up due
|
Substitute type arguments into uses of Objective-C interface members.
When messaging a method that was defined in an Objective-C class (or
category or extension thereof) that has type parameters, substitute
the type arguments for those type parameters. Similarly, substitute
into property accesses, instance variables, and other references.
This includes general infrastructure for substituting the type
arguments associated with an ObjCObject(Pointer)Type into a type
referenced within a particular context, handling all of the
substitutions required to deal with (e.g.) inheritance involving
parameterized classes. In cases where no type arguments are available
(e.g., because we're messaging via some unspecialized type, id, etc.),
we substitute in the type bounds for the type parameters instead.
Example:
@interface NSSet<T : id<NSCopying>> : NSObject <NSCopying>
- (T)firstObject;
@end
void f(NSSet<NSString *> *stringSet, NSSet *anySet) {
[stringSet firstObject]; // produces NSString*
[anySet firstObject]; // produces id<NSCopying> (the bound)
}
When substituting for the type parameters given an unspecialized
context (i.e., no specific type arguments were given), substituting
the type bounds unconditionally produces type signatures that are too
strong compared to the pre-generics signatures. Instead, use the
following rule:
- In covariant positions, such as method return types, replace type
parameters with “id” or “Class” (the latter only when the type
parameter bound is “Class” or qualified class, e.g,
“Class<NSCopying>”)
- In other positions (e.g., parameter types), replace type
parameters with their type bounds.
- When a specialized Objective-C object or object pointer type
contains a type parameter in its type arguments (e.g.,
NSArray<T>*, but not NSArray<NSString *> *), replace the entire
object/object pointer type with its unspecialized version (e.g.,
NSArray *).
llvm-svn: 241543
2015-07-07 11:57:53 +08:00
|
|
|
/// to type erasure at various points, e.g., related result types or the use
|
|
|
|
/// of parameterized classes.
|
|
|
|
static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT,
|
|
|
|
RValue Result) {
|
|
|
|
if (!ExpT->isObjCRetainableType())
|
2011-06-11 09:09:30 +08:00
|
|
|
return Result;
|
2011-06-16 07:02:42 +08:00
|
|
|
|
Substitute type arguments into uses of Objective-C interface members.
When messaging a method that was defined in an Objective-C class (or
category or extension thereof) that has type parameters, substitute
the type arguments for those type parameters. Similarly, substitute
into property accesses, instance variables, and other references.
This includes general infrastructure for substituting the type
arguments associated with an ObjCObject(Pointer)Type into a type
referenced within a particular context, handling all of the
substitutions required to deal with (e.g.) inheritance involving
parameterized classes. In cases where no type arguments are available
(e.g., because we're messaging via some unspecialized type, id, etc.),
we substitute in the type bounds for the type parameters instead.
Example:
@interface NSSet<T : id<NSCopying>> : NSObject <NSCopying>
- (T)firstObject;
@end
void f(NSSet<NSString *> *stringSet, NSSet *anySet) {
[stringSet firstObject]; // produces NSString*
[anySet firstObject]; // produces id<NSCopying> (the bound)
}
When substituting for the type parameters given an unspecialized
context (i.e., no specific type arguments were given), substituting
the type bounds unconditionally produces type signatures that are too
strong compared to the pre-generics signatures. Instead, use the
following rule:
- In covariant positions, such as method return types, replace type
parameters with “id” or “Class” (the latter only when the type
parameter bound is “Class” or qualified class, e.g,
“Class<NSCopying>”)
- In other positions (e.g., parameter types), replace type
parameters with their type bounds.
- When a specialized Objective-C object or object pointer type
contains a type parameter in its type arguments (e.g.,
NSArray<T>*, but not NSArray<NSString *> *), replace the entire
object/object pointer type with its unspecialized version (e.g.,
NSArray *).
llvm-svn: 241543
2015-07-07 11:57:53 +08:00
|
|
|
// If the converted types are the same, we're done.
|
|
|
|
llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT);
|
|
|
|
if (ExpLLVMTy == Result.getScalarVal()->getType())
|
2011-06-11 09:09:30 +08:00
|
|
|
return Result;
|
Substitute type arguments into uses of Objective-C interface members.
When messaging a method that was defined in an Objective-C class (or
category or extension thereof) that has type parameters, substitute
the type arguments for those type parameters. Similarly, substitute
into property accesses, instance variables, and other references.
This includes general infrastructure for substituting the type
arguments associated with an ObjCObject(Pointer)Type into a type
referenced within a particular context, handling all of the
substitutions required to deal with (e.g.) inheritance involving
parameterized classes. In cases where no type arguments are available
(e.g., because we're messaging via some unspecialized type, id, etc.),
we substitute in the type bounds for the type parameters instead.
Example:
@interface NSSet<T : id<NSCopying>> : NSObject <NSCopying>
- (T)firstObject;
@end
void f(NSSet<NSString *> *stringSet, NSSet *anySet) {
[stringSet firstObject]; // produces NSString*
[anySet firstObject]; // produces id<NSCopying> (the bound)
}
When substituting for the type parameters given an unspecialized
context (i.e., no specific type arguments were given), substituting
the type bounds unconditionally produces type signatures that are too
strong compared to the pre-generics signatures. Instead, use the
following rule:
- In covariant positions, such as method return types, replace type
parameters with “id” or “Class” (the latter only when the type
parameter bound is “Class” or qualified class, e.g,
“Class<NSCopying>”)
- In other positions (e.g., parameter types), replace type
parameters with their type bounds.
- When a specialized Objective-C object or object pointer type
contains a type parameter in its type arguments (e.g.,
NSArray<T>*, but not NSArray<NSString *> *), replace the entire
object/object pointer type with its unspecialized version (e.g.,
NSArray *).
llvm-svn: 241543
2015-07-07 11:57:53 +08:00
|
|
|
|
|
|
|
// We have applied a substitution. Cast the rvalue appropriately.
|
2011-06-11 09:09:30 +08:00
|
|
|
return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
|
Substitute type arguments into uses of Objective-C interface members.
When messaging a method that was defined in an Objective-C class (or
category or extension thereof) that has type parameters, substitute
the type arguments for those type parameters. Similarly, substitute
into property accesses, instance variables, and other references.
This includes general infrastructure for substituting the type
arguments associated with an ObjCObject(Pointer)Type into a type
referenced within a particular context, handling all of the
substitutions required to deal with (e.g.) inheritance involving
parameterized classes. In cases where no type arguments are available
(e.g., because we're messaging via some unspecialized type, id, etc.),
we substitute in the type bounds for the type parameters instead.
Example:
@interface NSSet<T : id<NSCopying>> : NSObject <NSCopying>
- (T)firstObject;
@end
void f(NSSet<NSString *> *stringSet, NSSet *anySet) {
[stringSet firstObject]; // produces NSString*
[anySet firstObject]; // produces id<NSCopying> (the bound)
}
When substituting for the type parameters given an unspecialized
context (i.e., no specific type arguments were given), substituting
the type bounds unconditionally produces type signatures that are too
strong compared to the pre-generics signatures. Instead, use the
following rule:
- In covariant positions, such as method return types, replace type
parameters with “id” or “Class” (the latter only when the type
parameter bound is “Class” or qualified class, e.g,
“Class<NSCopying>”)
- In other positions (e.g., parameter types), replace type
parameters with their type bounds.
- When a specialized Objective-C object or object pointer type
contains a type parameter in its type arguments (e.g.,
NSArray<T>*, but not NSArray<NSString *> *), replace the entire
object/object pointer type with its unspecialized version (e.g.,
NSArray *).
llvm-svn: 241543
2015-07-07 11:57:53 +08:00
|
|
|
ExpLLVMTy));
|
2011-06-11 09:09:30 +08:00
|
|
|
}
|
2008-06-25 01:04:18 +08:00
|
|
|
|
2011-07-22 16:53:00 +08:00
|
|
|
/// Decide whether to extend the lifetime of the receiver of a
|
|
|
|
/// returns-inner-pointer message.
|
|
|
|
static bool
|
|
|
|
shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
|
|
|
|
switch (message->getReceiverKind()) {
|
|
|
|
|
|
|
|
// For a normal instance message, we should extend unless the
|
|
|
|
// receiver is loaded from a variable with precise lifetime.
|
|
|
|
case ObjCMessageExpr::Instance: {
|
|
|
|
const Expr *receiver = message->getInstanceReceiver();
|
2015-09-10 07:37:17 +08:00
|
|
|
|
|
|
|
// Look through OVEs.
|
|
|
|
if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
|
|
|
|
if (opaque->getSourceExpr())
|
|
|
|
receiver = opaque->getSourceExpr()->IgnoreParens();
|
|
|
|
}
|
|
|
|
|
2011-07-22 16:53:00 +08:00
|
|
|
const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
|
|
|
|
if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
|
|
|
|
receiver = ice->getSubExpr()->IgnoreParens();
|
|
|
|
|
2015-09-10 07:37:17 +08:00
|
|
|
// Look through OVEs.
|
|
|
|
if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) {
|
|
|
|
if (opaque->getSourceExpr())
|
|
|
|
receiver = opaque->getSourceExpr()->IgnoreParens();
|
|
|
|
}
|
|
|
|
|
2011-07-22 16:53:00 +08:00
|
|
|
// Only __strong variables.
|
|
|
|
if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// All ivars and fields have precise lifetime.
|
|
|
|
if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Otherwise, check for variables.
|
|
|
|
const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr());
|
|
|
|
if (!declRef) return true;
|
|
|
|
const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl());
|
|
|
|
if (!var) return true;
|
|
|
|
|
|
|
|
// All variables have precise lifetime except local variables with
|
|
|
|
// automatic storage duration that aren't specially marked.
|
|
|
|
return (var->hasLocalStorage() &&
|
|
|
|
!var->hasAttr<ObjCPreciseLifetimeAttr>());
|
|
|
|
}
|
|
|
|
|
|
|
|
case ObjCMessageExpr::Class:
|
|
|
|
case ObjCMessageExpr::SuperClass:
|
|
|
|
// It's never necessary for class objects.
|
|
|
|
return false;
|
|
|
|
|
|
|
|
case ObjCMessageExpr::SuperInstance:
|
|
|
|
// We generally assume that 'self' lives throughout a method call.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm_unreachable("invalid receiver kind");
|
|
|
|
}
|
|
|
|
|
Define weak and __weak to mean ARC-style weak references, even in MRC.
Previously, __weak was silently accepted and ignored in MRC mode.
That makes this a potentially source-breaking change that we have to
roll out cautiously. Accordingly, for the time being, actual support
for __weak references in MRC is experimental, and the compiler will
reject attempts to actually form such references. The intent is to
eventually enable the feature by default in all non-GC modes.
(It is, of course, incompatible with ObjC GC's interpretation of
__weak.)
If you like, you can enable this feature with
-Xclang -fobjc-weak
but like any -Xclang option, this option may be removed at any point,
e.g. if/when it is eventually enabled by default.
This patch also enables the use of the ARC __unsafe_unretained qualifier
in MRC. Unlike __weak, this is being enabled immediately. Since
variables are essentially __unsafe_unretained by default in MRC,
the only practical uses are (1) communication and (2) changing the
default behavior of by-value block capture.
As an implementation matter, this means that the ObjC ownership
qualifiers may appear in any ObjC language mode, and so this patch
removes a number of checks for getLangOpts().ObjCAutoRefCount
that were guarding the processing of these qualifiers. I don't
expect this to be a significant drain on performance; it may even
be faster to just check for these qualifiers directly on a type
(since it's probably in a register anyway) than to do N dependent
loads to grab the LangOptions.
rdar://9674298
llvm-svn: 251041
2015-10-23 02:38:17 +08:00
|
|
|
/// Given an expression of ObjC pointer type, check whether it was
|
|
|
|
/// immediately loaded from an ARC __weak l-value.
|
|
|
|
static const Expr *findWeakLValue(const Expr *E) {
|
|
|
|
assert(E->getType()->isObjCRetainableType());
|
|
|
|
E = E->IgnoreParens();
|
|
|
|
if (auto CE = dyn_cast<CastExpr>(E)) {
|
|
|
|
if (CE->getCastKind() == CK_LValueToRValue) {
|
|
|
|
if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
|
|
|
|
return CE->getSubExpr();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2010-05-22 09:48:05 +08:00
|
|
|
RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
|
|
|
|
ReturnValueSlot Return) {
|
2008-06-25 01:04:18 +08:00
|
|
|
// Only the lookup mechanism and first two arguments of the method
|
|
|
|
// implementation vary between runtimes. We can get the receiver and
|
|
|
|
// arguments in generic code.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
bool isDelegateInit = E->isDelegateInitCall();
|
|
|
|
|
2011-07-22 16:53:00 +08:00
|
|
|
const ObjCMethodDecl *method = E->getMethodDecl();
|
2012-01-30 04:27:13 +08:00
|
|
|
|
Define weak and __weak to mean ARC-style weak references, even in MRC.
Previously, __weak was silently accepted and ignored in MRC mode.
That makes this a potentially source-breaking change that we have to
roll out cautiously. Accordingly, for the time being, actual support
for __weak references in MRC is experimental, and the compiler will
reject attempts to actually form such references. The intent is to
eventually enable the feature by default in all non-GC modes.
(It is, of course, incompatible with ObjC GC's interpretation of
__weak.)
If you like, you can enable this feature with
-Xclang -fobjc-weak
but like any -Xclang option, this option may be removed at any point,
e.g. if/when it is eventually enabled by default.
This patch also enables the use of the ARC __unsafe_unretained qualifier
in MRC. Unlike __weak, this is being enabled immediately. Since
variables are essentially __unsafe_unretained by default in MRC,
the only practical uses are (1) communication and (2) changing the
default behavior of by-value block capture.
As an implementation matter, this means that the ObjC ownership
qualifiers may appear in any ObjC language mode, and so this patch
removes a number of checks for getLangOpts().ObjCAutoRefCount
that were guarding the processing of these qualifiers. I don't
expect this to be a significant drain on performance; it may even
be faster to just check for these qualifiers directly on a type
(since it's probably in a register anyway) than to do N dependent
loads to grab the LangOptions.
rdar://9674298
llvm-svn: 251041
2015-10-23 02:38:17 +08:00
|
|
|
// If the method is -retain, and the receiver's being loaded from
|
|
|
|
// a __weak variable, peephole the entire operation to objc_loadWeakRetained.
|
|
|
|
if (method && E->getReceiverKind() == ObjCMessageExpr::Instance &&
|
|
|
|
method->getMethodFamily() == OMF_retain) {
|
|
|
|
if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) {
|
|
|
|
LValue lvalue = EmitLValue(lvalueExpr);
|
|
|
|
llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress());
|
|
|
|
return AdjustObjCObjectType(*this, E->getType(), RValue::get(result));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
// We don't retain the receiver in delegate init calls, and this is
|
|
|
|
// safe because the receiver value is always loaded from 'self',
|
|
|
|
// which we zero out. We don't want to Block_copy block receivers,
|
|
|
|
// though.
|
|
|
|
bool retainSelf =
|
|
|
|
(!isDelegateInit &&
|
2012-03-11 15:00:24 +08:00
|
|
|
CGM.getLangOpts().ObjCAutoRefCount &&
|
2011-07-22 16:53:00 +08:00
|
|
|
method &&
|
|
|
|
method->hasAttr<NSConsumesSelfAttr>());
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2008-08-12 02:12:00 +08:00
|
|
|
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
|
2008-06-25 01:04:18 +08:00
|
|
|
bool isSuperMessage = false;
|
2008-08-25 16:19:24 +08:00
|
|
|
bool isClassMessage = false;
|
2014-05-21 13:09:00 +08:00
|
|
|
ObjCInterfaceDecl *OID = nullptr;
|
2008-06-25 01:04:18 +08:00
|
|
|
// Find the receiver
|
2011-06-11 09:09:30 +08:00
|
|
|
QualType ReceiverType;
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *Receiver = nullptr;
|
Overhaul the AST representation of Objective-C message send
expressions, to improve source-location information, clarify the
actual receiver of the message, and pave the way for proper C++
support. The ObjCMessageExpr node represents four different kinds of
message sends in a single AST node:
1) Send to a object instance described by an expression (e.g., [x method:5])
2) Send to a class described by the class name (e.g., [NSString method:5])
3) Send to a superclass class (e.g, [super method:5] in class method)
4) Send to a superclass instance (e.g., [super method:5] in instance method)
Previously these four cases where tangled together. Now, they have
more distinct representations. Specific changes:
1) Unchanged; the object instance is represented by an Expr*.
2) Previously stored the ObjCInterfaceDecl* referring to the class
receiving the message. Now stores a TypeSourceInfo* so that we know
how the class was spelled. This both maintains typedef information
and opens the door for more complicated C++ types (e.g., dependent
types). There was an alternative, unused representation of these
sends by naming the class via an IdentifierInfo *. In practice, we
either had an ObjCInterfaceDecl *, from which we would get the
IdentifierInfo *, or we fell into the case below...
3) Previously represented by a class message whose IdentifierInfo *
referred to "super". Sema and CodeGen would use isStr("super") to
determine if they had a send to super. Now represented as a
"class super" send, where we have both the location of the "super"
keyword and the ObjCInterfaceDecl* of the superclass we're
targetting (statically).
4) Previously represented by an instance message whose receiver is a
an ObjCSuperExpr, which Sema and CodeGen would check for via
isa<ObjCSuperExpr>(). Now represented as an "instance super" send,
where we have both the location of the "super" keyword and the
ObjCInterfaceDecl* of the superclass we're targetting
(statically). Note that ObjCSuperExpr only has one remaining use in
the AST, which is for "super.prop" references.
The new representation of ObjCMessageExpr is 2 pointers smaller than
the old one, since it combines more storage. It also eliminates a leak
when we loaded message-send expressions from a precompiled header. The
representation also feels much cleaner to me; comments welcome!
This patch attempts to maintain the same semantics we previously had
with Objective-C message sends. In several places, there are massive
changes that boil down to simply replacing a nested-if structure such
as:
if (message has a receiver expression) {
// instance message
if (isa<ObjCSuperExpr>(...)) {
// send to super
} else {
// send to an object
}
} else {
// class message
if (name->isStr("super")) {
// class send to super
} else {
// send to class
}
}
with a switch
switch (E->getReceiverKind()) {
case ObjCMessageExpr::SuperInstance: ...
case ObjCMessageExpr::Instance: ...
case ObjCMessageExpr::SuperClass: ...
case ObjCMessageExpr::Class:...
}
There are quite a few places (particularly in the checkers) where
send-to-super is effectively ignored. I've placed FIXMEs in most of
them, and attempted to address send-to-super in a reasonable way. This
could use some review.
llvm-svn: 101972
2010-04-21 08:45:42 +08:00
|
|
|
switch (E->getReceiverKind()) {
|
|
|
|
case ObjCMessageExpr::Instance:
|
2011-06-11 09:09:30 +08:00
|
|
|
ReceiverType = E->getInstanceReceiver()->getType();
|
2011-06-16 07:02:42 +08:00
|
|
|
if (retainSelf) {
|
|
|
|
TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
|
|
|
|
E->getInstanceReceiver());
|
|
|
|
Receiver = ter.getPointer();
|
2011-07-22 16:53:00 +08:00
|
|
|
if (ter.getInt()) retainSelf = false;
|
2011-06-16 07:02:42 +08:00
|
|
|
} else
|
|
|
|
Receiver = EmitScalarExpr(E->getInstanceReceiver());
|
Overhaul the AST representation of Objective-C message send
expressions, to improve source-location information, clarify the
actual receiver of the message, and pave the way for proper C++
support. The ObjCMessageExpr node represents four different kinds of
message sends in a single AST node:
1) Send to a object instance described by an expression (e.g., [x method:5])
2) Send to a class described by the class name (e.g., [NSString method:5])
3) Send to a superclass class (e.g, [super method:5] in class method)
4) Send to a superclass instance (e.g., [super method:5] in instance method)
Previously these four cases where tangled together. Now, they have
more distinct representations. Specific changes:
1) Unchanged; the object instance is represented by an Expr*.
2) Previously stored the ObjCInterfaceDecl* referring to the class
receiving the message. Now stores a TypeSourceInfo* so that we know
how the class was spelled. This both maintains typedef information
and opens the door for more complicated C++ types (e.g., dependent
types). There was an alternative, unused representation of these
sends by naming the class via an IdentifierInfo *. In practice, we
either had an ObjCInterfaceDecl *, from which we would get the
IdentifierInfo *, or we fell into the case below...
3) Previously represented by a class message whose IdentifierInfo *
referred to "super". Sema and CodeGen would use isStr("super") to
determine if they had a send to super. Now represented as a
"class super" send, where we have both the location of the "super"
keyword and the ObjCInterfaceDecl* of the superclass we're
targetting (statically).
4) Previously represented by an instance message whose receiver is a
an ObjCSuperExpr, which Sema and CodeGen would check for via
isa<ObjCSuperExpr>(). Now represented as an "instance super" send,
where we have both the location of the "super" keyword and the
ObjCInterfaceDecl* of the superclass we're targetting
(statically). Note that ObjCSuperExpr only has one remaining use in
the AST, which is for "super.prop" references.
The new representation of ObjCMessageExpr is 2 pointers smaller than
the old one, since it combines more storage. It also eliminates a leak
when we loaded message-send expressions from a precompiled header. The
representation also feels much cleaner to me; comments welcome!
This patch attempts to maintain the same semantics we previously had
with Objective-C message sends. In several places, there are massive
changes that boil down to simply replacing a nested-if structure such
as:
if (message has a receiver expression) {
// instance message
if (isa<ObjCSuperExpr>(...)) {
// send to super
} else {
// send to an object
}
} else {
// class message
if (name->isStr("super")) {
// class send to super
} else {
// send to class
}
}
with a switch
switch (E->getReceiverKind()) {
case ObjCMessageExpr::SuperInstance: ...
case ObjCMessageExpr::Instance: ...
case ObjCMessageExpr::SuperClass: ...
case ObjCMessageExpr::Class:...
}
There are quite a few places (particularly in the checkers) where
send-to-super is effectively ignored. I've placed FIXMEs in most of
them, and attempted to address send-to-super in a reasonable way. This
could use some review.
llvm-svn: 101972
2010-04-21 08:45:42 +08:00
|
|
|
break;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Overhaul the AST representation of Objective-C message send
expressions, to improve source-location information, clarify the
actual receiver of the message, and pave the way for proper C++
support. The ObjCMessageExpr node represents four different kinds of
message sends in a single AST node:
1) Send to a object instance described by an expression (e.g., [x method:5])
2) Send to a class described by the class name (e.g., [NSString method:5])
3) Send to a superclass class (e.g, [super method:5] in class method)
4) Send to a superclass instance (e.g., [super method:5] in instance method)
Previously these four cases where tangled together. Now, they have
more distinct representations. Specific changes:
1) Unchanged; the object instance is represented by an Expr*.
2) Previously stored the ObjCInterfaceDecl* referring to the class
receiving the message. Now stores a TypeSourceInfo* so that we know
how the class was spelled. This both maintains typedef information
and opens the door for more complicated C++ types (e.g., dependent
types). There was an alternative, unused representation of these
sends by naming the class via an IdentifierInfo *. In practice, we
either had an ObjCInterfaceDecl *, from which we would get the
IdentifierInfo *, or we fell into the case below...
3) Previously represented by a class message whose IdentifierInfo *
referred to "super". Sema and CodeGen would use isStr("super") to
determine if they had a send to super. Now represented as a
"class super" send, where we have both the location of the "super"
keyword and the ObjCInterfaceDecl* of the superclass we're
targetting (statically).
4) Previously represented by an instance message whose receiver is a
an ObjCSuperExpr, which Sema and CodeGen would check for via
isa<ObjCSuperExpr>(). Now represented as an "instance super" send,
where we have both the location of the "super" keyword and the
ObjCInterfaceDecl* of the superclass we're targetting
(statically). Note that ObjCSuperExpr only has one remaining use in
the AST, which is for "super.prop" references.
The new representation of ObjCMessageExpr is 2 pointers smaller than
the old one, since it combines more storage. It also eliminates a leak
when we loaded message-send expressions from a precompiled header. The
representation also feels much cleaner to me; comments welcome!
This patch attempts to maintain the same semantics we previously had
with Objective-C message sends. In several places, there are massive
changes that boil down to simply replacing a nested-if structure such
as:
if (message has a receiver expression) {
// instance message
if (isa<ObjCSuperExpr>(...)) {
// send to super
} else {
// send to an object
}
} else {
// class message
if (name->isStr("super")) {
// class send to super
} else {
// send to class
}
}
with a switch
switch (E->getReceiverKind()) {
case ObjCMessageExpr::SuperInstance: ...
case ObjCMessageExpr::Instance: ...
case ObjCMessageExpr::SuperClass: ...
case ObjCMessageExpr::Class:...
}
There are quite a few places (particularly in the checkers) where
send-to-super is effectively ignored. I've placed FIXMEs in most of
them, and attempted to address send-to-super in a reasonable way. This
could use some review.
llvm-svn: 101972
2010-04-21 08:45:42 +08:00
|
|
|
case ObjCMessageExpr::Class: {
|
2011-06-11 09:09:30 +08:00
|
|
|
ReceiverType = E->getClassReceiver();
|
|
|
|
const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
|
2010-05-18 04:12:43 +08:00
|
|
|
assert(ObjTy && "Invalid Objective-C class message send");
|
|
|
|
OID = ObjTy->getInterface();
|
|
|
|
assert(OID && "Invalid Objective-C class message send");
|
2013-03-01 03:01:20 +08:00
|
|
|
Receiver = Runtime.GetClass(*this, OID);
|
2008-08-25 16:19:24 +08:00
|
|
|
isClassMessage = true;
|
Overhaul the AST representation of Objective-C message send
expressions, to improve source-location information, clarify the
actual receiver of the message, and pave the way for proper C++
support. The ObjCMessageExpr node represents four different kinds of
message sends in a single AST node:
1) Send to a object instance described by an expression (e.g., [x method:5])
2) Send to a class described by the class name (e.g., [NSString method:5])
3) Send to a superclass class (e.g, [super method:5] in class method)
4) Send to a superclass instance (e.g., [super method:5] in instance method)
Previously these four cases where tangled together. Now, they have
more distinct representations. Specific changes:
1) Unchanged; the object instance is represented by an Expr*.
2) Previously stored the ObjCInterfaceDecl* referring to the class
receiving the message. Now stores a TypeSourceInfo* so that we know
how the class was spelled. This both maintains typedef information
and opens the door for more complicated C++ types (e.g., dependent
types). There was an alternative, unused representation of these
sends by naming the class via an IdentifierInfo *. In practice, we
either had an ObjCInterfaceDecl *, from which we would get the
IdentifierInfo *, or we fell into the case below...
3) Previously represented by a class message whose IdentifierInfo *
referred to "super". Sema and CodeGen would use isStr("super") to
determine if they had a send to super. Now represented as a
"class super" send, where we have both the location of the "super"
keyword and the ObjCInterfaceDecl* of the superclass we're
targetting (statically).
4) Previously represented by an instance message whose receiver is a
an ObjCSuperExpr, which Sema and CodeGen would check for via
isa<ObjCSuperExpr>(). Now represented as an "instance super" send,
where we have both the location of the "super" keyword and the
ObjCInterfaceDecl* of the superclass we're targetting
(statically). Note that ObjCSuperExpr only has one remaining use in
the AST, which is for "super.prop" references.
The new representation of ObjCMessageExpr is 2 pointers smaller than
the old one, since it combines more storage. It also eliminates a leak
when we loaded message-send expressions from a precompiled header. The
representation also feels much cleaner to me; comments welcome!
This patch attempts to maintain the same semantics we previously had
with Objective-C message sends. In several places, there are massive
changes that boil down to simply replacing a nested-if structure such
as:
if (message has a receiver expression) {
// instance message
if (isa<ObjCSuperExpr>(...)) {
// send to super
} else {
// send to an object
}
} else {
// class message
if (name->isStr("super")) {
// class send to super
} else {
// send to class
}
}
with a switch
switch (E->getReceiverKind()) {
case ObjCMessageExpr::SuperInstance: ...
case ObjCMessageExpr::Instance: ...
case ObjCMessageExpr::SuperClass: ...
case ObjCMessageExpr::Class:...
}
There are quite a few places (particularly in the checkers) where
send-to-super is effectively ignored. I've placed FIXMEs in most of
them, and attempted to address send-to-super in a reasonable way. This
could use some review.
llvm-svn: 101972
2010-04-21 08:45:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ObjCMessageExpr::SuperInstance:
|
2011-06-11 09:09:30 +08:00
|
|
|
ReceiverType = E->getSuperType();
|
Overhaul the AST representation of Objective-C message send
expressions, to improve source-location information, clarify the
actual receiver of the message, and pave the way for proper C++
support. The ObjCMessageExpr node represents four different kinds of
message sends in a single AST node:
1) Send to a object instance described by an expression (e.g., [x method:5])
2) Send to a class described by the class name (e.g., [NSString method:5])
3) Send to a superclass class (e.g, [super method:5] in class method)
4) Send to a superclass instance (e.g., [super method:5] in instance method)
Previously these four cases where tangled together. Now, they have
more distinct representations. Specific changes:
1) Unchanged; the object instance is represented by an Expr*.
2) Previously stored the ObjCInterfaceDecl* referring to the class
receiving the message. Now stores a TypeSourceInfo* so that we know
how the class was spelled. This both maintains typedef information
and opens the door for more complicated C++ types (e.g., dependent
types). There was an alternative, unused representation of these
sends by naming the class via an IdentifierInfo *. In practice, we
either had an ObjCInterfaceDecl *, from which we would get the
IdentifierInfo *, or we fell into the case below...
3) Previously represented by a class message whose IdentifierInfo *
referred to "super". Sema and CodeGen would use isStr("super") to
determine if they had a send to super. Now represented as a
"class super" send, where we have both the location of the "super"
keyword and the ObjCInterfaceDecl* of the superclass we're
targetting (statically).
4) Previously represented by an instance message whose receiver is a
an ObjCSuperExpr, which Sema and CodeGen would check for via
isa<ObjCSuperExpr>(). Now represented as an "instance super" send,
where we have both the location of the "super" keyword and the
ObjCInterfaceDecl* of the superclass we're targetting
(statically). Note that ObjCSuperExpr only has one remaining use in
the AST, which is for "super.prop" references.
The new representation of ObjCMessageExpr is 2 pointers smaller than
the old one, since it combines more storage. It also eliminates a leak
when we loaded message-send expressions from a precompiled header. The
representation also feels much cleaner to me; comments welcome!
This patch attempts to maintain the same semantics we previously had
with Objective-C message sends. In several places, there are massive
changes that boil down to simply replacing a nested-if structure such
as:
if (message has a receiver expression) {
// instance message
if (isa<ObjCSuperExpr>(...)) {
// send to super
} else {
// send to an object
}
} else {
// class message
if (name->isStr("super")) {
// class send to super
} else {
// send to class
}
}
with a switch
switch (E->getReceiverKind()) {
case ObjCMessageExpr::SuperInstance: ...
case ObjCMessageExpr::Instance: ...
case ObjCMessageExpr::SuperClass: ...
case ObjCMessageExpr::Class:...
}
There are quite a few places (particularly in the checkers) where
send-to-super is effectively ignored. I've placed FIXMEs in most of
them, and attempted to address send-to-super in a reasonable way. This
could use some review.
llvm-svn: 101972
2010-04-21 08:45:42 +08:00
|
|
|
Receiver = LoadObjCSelf();
|
2008-06-25 01:04:18 +08:00
|
|
|
isSuperMessage = true;
|
Overhaul the AST representation of Objective-C message send
expressions, to improve source-location information, clarify the
actual receiver of the message, and pave the way for proper C++
support. The ObjCMessageExpr node represents four different kinds of
message sends in a single AST node:
1) Send to a object instance described by an expression (e.g., [x method:5])
2) Send to a class described by the class name (e.g., [NSString method:5])
3) Send to a superclass class (e.g, [super method:5] in class method)
4) Send to a superclass instance (e.g., [super method:5] in instance method)
Previously these four cases where tangled together. Now, they have
more distinct representations. Specific changes:
1) Unchanged; the object instance is represented by an Expr*.
2) Previously stored the ObjCInterfaceDecl* referring to the class
receiving the message. Now stores a TypeSourceInfo* so that we know
how the class was spelled. This both maintains typedef information
and opens the door for more complicated C++ types (e.g., dependent
types). There was an alternative, unused representation of these
sends by naming the class via an IdentifierInfo *. In practice, we
either had an ObjCInterfaceDecl *, from which we would get the
IdentifierInfo *, or we fell into the case below...
3) Previously represented by a class message whose IdentifierInfo *
referred to "super". Sema and CodeGen would use isStr("super") to
determine if they had a send to super. Now represented as a
"class super" send, where we have both the location of the "super"
keyword and the ObjCInterfaceDecl* of the superclass we're
targetting (statically).
4) Previously represented by an instance message whose receiver is a
an ObjCSuperExpr, which Sema and CodeGen would check for via
isa<ObjCSuperExpr>(). Now represented as an "instance super" send,
where we have both the location of the "super" keyword and the
ObjCInterfaceDecl* of the superclass we're targetting
(statically). Note that ObjCSuperExpr only has one remaining use in
the AST, which is for "super.prop" references.
The new representation of ObjCMessageExpr is 2 pointers smaller than
the old one, since it combines more storage. It also eliminates a leak
when we loaded message-send expressions from a precompiled header. The
representation also feels much cleaner to me; comments welcome!
This patch attempts to maintain the same semantics we previously had
with Objective-C message sends. In several places, there are massive
changes that boil down to simply replacing a nested-if structure such
as:
if (message has a receiver expression) {
// instance message
if (isa<ObjCSuperExpr>(...)) {
// send to super
} else {
// send to an object
}
} else {
// class message
if (name->isStr("super")) {
// class send to super
} else {
// send to class
}
}
with a switch
switch (E->getReceiverKind()) {
case ObjCMessageExpr::SuperInstance: ...
case ObjCMessageExpr::Instance: ...
case ObjCMessageExpr::SuperClass: ...
case ObjCMessageExpr::Class:...
}
There are quite a few places (particularly in the checkers) where
send-to-super is effectively ignored. I've placed FIXMEs in most of
them, and attempted to address send-to-super in a reasonable way. This
could use some review.
llvm-svn: 101972
2010-04-21 08:45:42 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ObjCMessageExpr::SuperClass:
|
2011-06-11 09:09:30 +08:00
|
|
|
ReceiverType = E->getSuperType();
|
2008-06-25 01:04:18 +08:00
|
|
|
Receiver = LoadObjCSelf();
|
Overhaul the AST representation of Objective-C message send
expressions, to improve source-location information, clarify the
actual receiver of the message, and pave the way for proper C++
support. The ObjCMessageExpr node represents four different kinds of
message sends in a single AST node:
1) Send to a object instance described by an expression (e.g., [x method:5])
2) Send to a class described by the class name (e.g., [NSString method:5])
3) Send to a superclass class (e.g, [super method:5] in class method)
4) Send to a superclass instance (e.g., [super method:5] in instance method)
Previously these four cases where tangled together. Now, they have
more distinct representations. Specific changes:
1) Unchanged; the object instance is represented by an Expr*.
2) Previously stored the ObjCInterfaceDecl* referring to the class
receiving the message. Now stores a TypeSourceInfo* so that we know
how the class was spelled. This both maintains typedef information
and opens the door for more complicated C++ types (e.g., dependent
types). There was an alternative, unused representation of these
sends by naming the class via an IdentifierInfo *. In practice, we
either had an ObjCInterfaceDecl *, from which we would get the
IdentifierInfo *, or we fell into the case below...
3) Previously represented by a class message whose IdentifierInfo *
referred to "super". Sema and CodeGen would use isStr("super") to
determine if they had a send to super. Now represented as a
"class super" send, where we have both the location of the "super"
keyword and the ObjCInterfaceDecl* of the superclass we're
targetting (statically).
4) Previously represented by an instance message whose receiver is a
an ObjCSuperExpr, which Sema and CodeGen would check for via
isa<ObjCSuperExpr>(). Now represented as an "instance super" send,
where we have both the location of the "super" keyword and the
ObjCInterfaceDecl* of the superclass we're targetting
(statically). Note that ObjCSuperExpr only has one remaining use in
the AST, which is for "super.prop" references.
The new representation of ObjCMessageExpr is 2 pointers smaller than
the old one, since it combines more storage. It also eliminates a leak
when we loaded message-send expressions from a precompiled header. The
representation also feels much cleaner to me; comments welcome!
This patch attempts to maintain the same semantics we previously had
with Objective-C message sends. In several places, there are massive
changes that boil down to simply replacing a nested-if structure such
as:
if (message has a receiver expression) {
// instance message
if (isa<ObjCSuperExpr>(...)) {
// send to super
} else {
// send to an object
}
} else {
// class message
if (name->isStr("super")) {
// class send to super
} else {
// send to class
}
}
with a switch
switch (E->getReceiverKind()) {
case ObjCMessageExpr::SuperInstance: ...
case ObjCMessageExpr::Instance: ...
case ObjCMessageExpr::SuperClass: ...
case ObjCMessageExpr::Class:...
}
There are quite a few places (particularly in the checkers) where
send-to-super is effectively ignored. I've placed FIXMEs in most of
them, and attempted to address send-to-super in a reasonable way. This
could use some review.
llvm-svn: 101972
2010-04-21 08:45:42 +08:00
|
|
|
isSuperMessage = true;
|
|
|
|
isClassMessage = true;
|
|
|
|
break;
|
2008-06-25 01:04:18 +08:00
|
|
|
}
|
|
|
|
|
2011-07-22 16:53:00 +08:00
|
|
|
if (retainSelf)
|
|
|
|
Receiver = EmitARCRetainNonBlock(Receiver);
|
|
|
|
|
|
|
|
// In ARC, we sometimes want to "extend the lifetime"
|
|
|
|
// (i.e. retain+autorelease) of receivers of returns-inner-pointer
|
|
|
|
// messages.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getLangOpts().ObjCAutoRefCount && method &&
|
2011-07-22 16:53:00 +08:00
|
|
|
method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
|
|
|
|
shouldExtendReceiverForInnerPointerMessage(E))
|
|
|
|
Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
|
|
|
|
|
2014-01-26 00:55:45 +08:00
|
|
|
QualType ResultType = method ? method->getReturnType() : E->getType();
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2008-08-30 11:02:31 +08:00
|
|
|
CallArgList Args;
|
2017-03-06 13:28:22 +08:00
|
|
|
EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
// For delegate init calls in ARC, do an unsafe store of null into
|
|
|
|
// self. This represents the call taking direct ownership of that
|
|
|
|
// value. We have to do this after emitting the other call
|
|
|
|
// arguments because they might also reference self, but we don't
|
|
|
|
// have to worry about any of them modifying self because that would
|
|
|
|
// be an undefined read and write of an object in unordered
|
|
|
|
// expressions.
|
|
|
|
if (isDelegateInit) {
|
2012-03-11 15:00:24 +08:00
|
|
|
assert(getLangOpts().ObjCAutoRefCount &&
|
2011-06-16 07:02:42 +08:00
|
|
|
"delegate init calls should only be marked in ARC");
|
|
|
|
|
|
|
|
// Do an unsafe store of null into self.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address selfAddr =
|
|
|
|
GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
|
2011-06-16 07:02:42 +08:00
|
|
|
Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
|
|
|
|
}
|
2010-06-22 04:59:55 +08:00
|
|
|
|
2011-06-11 09:09:30 +08:00
|
|
|
RValue result;
|
2008-06-25 01:04:18 +08:00
|
|
|
if (isSuperMessage) {
|
2008-06-26 12:42:20 +08:00
|
|
|
// super is only valid in an Objective-C method
|
|
|
|
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
|
2009-03-01 04:07:56 +08:00
|
|
|
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
|
2011-06-11 09:09:30 +08:00
|
|
|
result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
|
|
|
|
E->getSelector(),
|
|
|
|
OMD->getClassInterface(),
|
|
|
|
isCategoryImpl,
|
|
|
|
Receiver,
|
|
|
|
isClassMessage,
|
|
|
|
Args,
|
2011-07-22 16:53:00 +08:00
|
|
|
method);
|
2011-06-11 09:09:30 +08:00
|
|
|
} else {
|
2016-03-22 04:50:03 +08:00
|
|
|
result = Runtime.GenerateMessageSend(*this, Return, ResultType,
|
|
|
|
E->getSelector(),
|
|
|
|
Receiver, Args, OID,
|
|
|
|
method);
|
2008-06-25 01:04:18 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// For delegate init calls in ARC, implicitly store the result of
|
|
|
|
// the call back into self. This takes ownership of the value.
|
|
|
|
if (isDelegateInit) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address selfAddr =
|
|
|
|
GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *newSelf = result.getScalarVal();
|
|
|
|
|
|
|
|
// The delegate return type isn't necessarily a matching type; in
|
|
|
|
// fact, it's quite likely to be 'id'.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Type *selfTy = selfAddr.getElementType();
|
2011-06-16 07:02:42 +08:00
|
|
|
newSelf = Builder.CreateBitCast(newSelf, selfTy);
|
|
|
|
|
|
|
|
Builder.CreateStore(newSelf, selfAddr);
|
|
|
|
}
|
2012-01-30 04:27:13 +08:00
|
|
|
|
Substitute type arguments into uses of Objective-C interface members.
When messaging a method that was defined in an Objective-C class (or
category or extension thereof) that has type parameters, substitute
the type arguments for those type parameters. Similarly, substitute
into property accesses, instance variables, and other references.
This includes general infrastructure for substituting the type
arguments associated with an ObjCObject(Pointer)Type into a type
referenced within a particular context, handling all of the
substitutions required to deal with (e.g.) inheritance involving
parameterized classes. In cases where no type arguments are available
(e.g., because we're messaging via some unspecialized type, id, etc.),
we substitute in the type bounds for the type parameters instead.
Example:
@interface NSSet<T : id<NSCopying>> : NSObject <NSCopying>
- (T)firstObject;
@end
void f(NSSet<NSString *> *stringSet, NSSet *anySet) {
[stringSet firstObject]; // produces NSString*
[anySet firstObject]; // produces id<NSCopying> (the bound)
}
When substituting for the type parameters given an unspecialized
context (i.e., no specific type arguments were given), substituting
the type bounds unconditionally produces type signatures that are too
strong compared to the pre-generics signatures. Instead, use the
following rule:
- In covariant positions, such as method return types, replace type
parameters with “id” or “Class” (the latter only when the type
parameter bound is “Class” or qualified class, e.g,
“Class<NSCopying>”)
- In other positions (e.g., parameter types), replace type
parameters with their type bounds.
- When a specialized Objective-C object or object pointer type
contains a type parameter in its type arguments (e.g.,
NSArray<T>*, but not NSArray<NSString *> *), replace the entire
object/object pointer type with its unspecialized version (e.g.,
NSArray *).
llvm-svn: 241543
2015-07-07 11:57:53 +08:00
|
|
|
return AdjustObjCObjectType(*this, E->getType(), result);
|
2007-08-22 01:43:55 +08:00
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
namespace {
|
2015-08-19 06:40:54 +08:00
|
|
|
struct FinishARCDealloc final : EHScopeStack::Cleanup {
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-06-16 07:02:42 +08:00
|
|
|
const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
|
2011-07-14 02:26:47 +08:00
|
|
|
|
|
|
|
const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
|
2011-06-16 07:02:42 +08:00
|
|
|
const ObjCInterfaceDecl *iface = impl->getClassInterface();
|
|
|
|
if (!iface->getSuperClass()) return;
|
|
|
|
|
2011-07-14 02:26:47 +08:00
|
|
|
bool isCategory = isa<ObjCCategoryImplDecl>(impl);
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
// Call [super dealloc] if we have a superclass.
|
|
|
|
llvm::Value *self = CGF.LoadObjCSelf();
|
|
|
|
|
|
|
|
CallArgList args;
|
|
|
|
CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
|
|
|
|
CGF.getContext().VoidTy,
|
|
|
|
method->getSelector(),
|
|
|
|
iface,
|
2011-07-14 02:26:47 +08:00
|
|
|
isCategory,
|
2011-06-16 07:02:42 +08:00
|
|
|
self,
|
|
|
|
/*is class msg*/ false,
|
|
|
|
args,
|
|
|
|
method);
|
|
|
|
}
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2008-08-26 16:29:31 +08:00
|
|
|
/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
|
|
|
|
/// the LLVM function and sets the other context used by
|
|
|
|
/// CodeGenFunction.
|
2009-01-11 05:06:09 +08:00
|
|
|
void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
|
2015-01-14 08:04:42 +08:00
|
|
|
const ObjCContainerDecl *CD) {
|
2018-08-10 05:08:08 +08:00
|
|
|
SourceLocation StartLoc = OMD->getBeginLoc();
|
2011-03-09 12:27:21 +08:00
|
|
|
FunctionArgList args;
|
2010-04-06 05:09:15 +08:00
|
|
|
// Check if we should generate debug info for this method.
|
2013-08-27 04:33:21 +08:00
|
|
|
if (OMD->hasAttr<NoDebugAttr>())
|
2014-05-21 13:09:00 +08:00
|
|
|
DebugInfo = nullptr; // disable debug info indefinitely for this function
|
2010-04-06 05:09:15 +08:00
|
|
|
|
2009-01-11 05:06:09 +08:00
|
|
|
llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
|
2008-09-05 07:41:35 +08:00
|
|
|
|
2012-02-17 11:33:10 +08:00
|
|
|
const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
|
2009-04-17 08:48:04 +08:00
|
|
|
CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
|
2008-06-18 02:05:57 +08:00
|
|
|
|
2011-03-09 12:27:21 +08:00
|
|
|
args.push_back(OMD->getSelfDecl());
|
|
|
|
args.push_back(OMD->getCmdDecl());
|
2008-06-18 02:05:57 +08:00
|
|
|
|
2015-02-18 00:48:30 +08:00
|
|
|
args.append(OMD->param_begin(), OMD->param_end());
|
2008-08-16 11:19:19 +08:00
|
|
|
|
2011-01-14 02:57:25 +08:00
|
|
|
CurGD = OMD;
|
2015-01-14 15:10:46 +08:00
|
|
|
CurEHLocation = OMD->getLocEnd();
|
2011-01-14 02:57:25 +08:00
|
|
|
|
2014-04-11 07:21:53 +08:00
|
|
|
StartFunction(OMD, OMD->getReturnType(), Fn, FI, args,
|
|
|
|
OMD->getLocation(), StartLoc);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// In ARC, certain methods get an extra cleanup.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGM.getLangOpts().ObjCAutoRefCount &&
|
2011-06-16 07:02:42 +08:00
|
|
|
OMD->isInstanceMethod() &&
|
|
|
|
OMD->getSelector().isUnarySelector()) {
|
2018-07-31 03:24:48 +08:00
|
|
|
const IdentifierInfo *ident =
|
2011-06-16 07:02:42 +08:00
|
|
|
OMD->getSelector().getIdentifierInfoForSlot(0);
|
|
|
|
if (ident->isStr("dealloc"))
|
|
|
|
EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
|
|
|
|
}
|
2008-08-26 16:29:31 +08:00
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
|
|
|
|
LValue lvalue, QualType type);
|
|
|
|
|
2008-08-26 16:29:31 +08:00
|
|
|
/// Generate an Objective-C method. An Objective-C method is a C function with
|
2009-09-09 23:08:12 +08:00
|
|
|
/// its pointer, name, and types registered in the class struture.
|
2008-08-26 16:29:31 +08:00
|
|
|
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
|
2015-01-14 08:04:42 +08:00
|
|
|
StartObjCMethod(OMD, OMD->getClassInterface());
|
2015-12-06 22:32:39 +08:00
|
|
|
PGO.assignRegionCounters(GlobalDecl(OMD), CurFn);
|
2014-01-08 06:05:55 +08:00
|
|
|
assert(isa<CompoundStmt>(OMD->getBody()));
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(OMD->getBody());
|
2014-01-08 06:05:55 +08:00
|
|
|
EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody()));
|
2009-06-30 10:35:26 +08:00
|
|
|
FinishFunction(OMD->getBodyRBrace());
|
2008-08-26 16:29:31 +08:00
|
|
|
}
|
|
|
|
|
2011-09-13 07:06:44 +08:00
|
|
|
/// emitStructGetterCall - Call the runtime function to load a property
|
|
|
|
/// into the return value slot.
|
2018-07-31 03:24:48 +08:00
|
|
|
static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
|
2011-09-13 07:06:44 +08:00
|
|
|
bool isAtomic, bool hasStrong) {
|
|
|
|
ASTContext &Context = CGF.getContext();
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address src =
|
|
|
|
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0)
|
|
|
|
.getAddress();
|
2011-09-13 07:06:44 +08:00
|
|
|
|
2018-07-31 03:24:48 +08:00
|
|
|
// objc_copyStruct (ReturnValue, &structIvar,
|
2011-09-13 07:06:44 +08:00
|
|
|
// sizeof (Type of Ivar), isAtomic, false);
|
|
|
|
CallArgList args;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
|
|
|
|
args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy);
|
2011-09-13 07:06:44 +08:00
|
|
|
|
|
|
|
src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
args.add(RValue::get(src.getPointer()), Context.VoidPtrTy);
|
2011-09-13 07:06:44 +08:00
|
|
|
|
|
|
|
CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
|
|
|
|
args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
|
|
|
|
args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
|
|
|
|
args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
|
|
|
|
|
2016-10-27 07:46:34 +08:00
|
|
|
llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
|
|
|
|
CGCallee callee = CGCallee::forDirect(fn);
|
2016-03-11 12:30:31 +08:00
|
|
|
CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args),
|
2016-10-27 07:46:34 +08:00
|
|
|
callee, ReturnValueSlot(), args);
|
2011-09-13 07:06:44 +08:00
|
|
|
}
|
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
/// Determine whether the given architecture supports unaligned atomic
|
|
|
|
/// accesses. They don't have to be fast, just faster than a function
|
|
|
|
/// call and a mutex.
|
|
|
|
static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
|
2011-09-14 04:48:30 +08:00
|
|
|
// FIXME: Allow unaligned atomic load/store on x86. (It is not
|
|
|
|
// currently supported by the backend.)
|
|
|
|
return 0;
|
2011-09-13 11:34:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the maximum size that permits atomic accesses for the given
|
|
|
|
/// architecture.
|
|
|
|
static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
|
|
|
|
llvm::Triple::ArchType arch) {
|
|
|
|
// ARM has 8-byte atomic accesses, but it's not clear whether we
|
|
|
|
// want to rely on them here.
|
|
|
|
|
|
|
|
// In the default case, just assume that any size up to a pointer is
|
|
|
|
// fine given adequate alignment.
|
|
|
|
return CharUnits::fromQuantity(CGM.PointerSizeInBytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class PropertyImplStrategy {
|
|
|
|
public:
|
|
|
|
enum StrategyKind {
|
|
|
|
/// The 'native' strategy is to use the architecture's provided
|
|
|
|
/// reads and writes.
|
|
|
|
Native,
|
|
|
|
|
|
|
|
/// Use objc_setProperty and objc_getProperty.
|
|
|
|
GetSetProperty,
|
|
|
|
|
|
|
|
/// Use objc_setProperty for the setter, but use expression
|
|
|
|
/// evaluation for the getter.
|
|
|
|
SetPropertyAndExpressionGet,
|
|
|
|
|
|
|
|
/// Use objc_copyStruct.
|
|
|
|
CopyStruct,
|
|
|
|
|
|
|
|
/// The 'expression' strategy is to emit normal assignment or
|
|
|
|
/// lvalue-to-rvalue expressions.
|
|
|
|
Expression
|
|
|
|
};
|
|
|
|
|
|
|
|
StrategyKind getKind() const { return StrategyKind(Kind); }
|
|
|
|
|
|
|
|
bool hasStrongMember() const { return HasStrong; }
|
|
|
|
bool isAtomic() const { return IsAtomic; }
|
|
|
|
bool isCopy() const { return IsCopy; }
|
|
|
|
|
|
|
|
CharUnits getIvarSize() const { return IvarSize; }
|
|
|
|
CharUnits getIvarAlignment() const { return IvarAlignment; }
|
|
|
|
|
|
|
|
PropertyImplStrategy(CodeGenModule &CGM,
|
|
|
|
const ObjCPropertyImplDecl *propImpl);
|
|
|
|
|
|
|
|
private:
|
|
|
|
unsigned Kind : 8;
|
|
|
|
unsigned IsAtomic : 1;
|
|
|
|
unsigned IsCopy : 1;
|
|
|
|
unsigned HasStrong : 1;
|
|
|
|
|
|
|
|
CharUnits IvarSize;
|
|
|
|
CharUnits IvarAlignment;
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2011-09-13 11:34:09 +08:00
|
|
|
|
2012-07-23 16:59:39 +08:00
|
|
|
/// Pick an implementation strategy for the given property synthesis.
|
2011-09-13 11:34:09 +08:00
|
|
|
PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
|
|
|
|
const ObjCPropertyImplDecl *propImpl) {
|
|
|
|
const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
|
2011-09-14 02:31:23 +08:00
|
|
|
ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
|
2011-09-13 11:34:09 +08:00
|
|
|
|
2011-09-14 02:31:23 +08:00
|
|
|
IsCopy = (setterKind == ObjCPropertyDecl::Copy);
|
|
|
|
IsAtomic = prop->isAtomic();
|
2011-09-13 11:34:09 +08:00
|
|
|
HasStrong = false; // doesn't matter here.
|
|
|
|
|
|
|
|
// Evaluate the ivar's size and alignment.
|
|
|
|
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
|
|
|
|
QualType ivarType = ivar->getType();
|
2014-03-02 21:01:17 +08:00
|
|
|
std::tie(IvarSize, IvarAlignment) =
|
|
|
|
CGM.getContext().getTypeInfoInChars(ivarType);
|
2011-09-13 11:34:09 +08:00
|
|
|
|
|
|
|
// If we have a copy property, we always have to use getProperty/setProperty.
|
2011-09-14 02:31:23 +08:00
|
|
|
// TODO: we could actually use setProperty and an expression for non-atomics.
|
2011-09-13 11:34:09 +08:00
|
|
|
if (IsCopy) {
|
|
|
|
Kind = GetSetProperty;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-09-14 02:31:23 +08:00
|
|
|
// Handle retain.
|
|
|
|
if (setterKind == ObjCPropertyDecl::Retain) {
|
2011-09-13 11:34:09 +08:00
|
|
|
// In GC-only, there's nothing special that needs to be done.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
|
2011-09-13 11:34:09 +08:00
|
|
|
// fallthrough
|
|
|
|
|
|
|
|
// In ARC, if the property is non-atomic, use expression emission,
|
|
|
|
// which translates to objc_storeStrong. This isn't required, but
|
|
|
|
// it's slightly nicer.
|
2012-03-11 15:00:24 +08:00
|
|
|
} else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
|
2012-08-21 07:36:59 +08:00
|
|
|
// Using standard expression emission for the setter is only
|
|
|
|
// acceptable if the ivar is __strong, which won't be true if
|
|
|
|
// the property is annotated with __attribute__((NSObject)).
|
|
|
|
// TODO: falling all the way back to objc_setProperty here is
|
|
|
|
// just laziness, though; we could still use objc_storeStrong
|
|
|
|
// if we hacked it right.
|
|
|
|
if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong)
|
|
|
|
Kind = Expression;
|
|
|
|
else
|
|
|
|
Kind = SetPropertyAndExpressionGet;
|
2011-09-13 11:34:09 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Otherwise, we need to at least use setProperty. However, if
|
|
|
|
// the property isn't atomic, we can use normal expression
|
|
|
|
// emission for the getter.
|
|
|
|
} else if (!IsAtomic) {
|
|
|
|
Kind = SetPropertyAndExpressionGet;
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Otherwise, we have to use both setProperty and getProperty.
|
|
|
|
} else {
|
|
|
|
Kind = GetSetProperty;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're not atomic, just use expression accesses.
|
|
|
|
if (!IsAtomic) {
|
|
|
|
Kind = Expression;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-09-13 13:36:29 +08:00
|
|
|
// Properties on bitfield ivars need to be emitted using expression
|
|
|
|
// accesses even if they're nominally atomic.
|
|
|
|
if (ivar->isBitField()) {
|
|
|
|
Kind = Expression;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
// GC-qualified or ARC-qualified ivars need to be emitted as
|
|
|
|
// expressions. This actually works out to being atomic anyway,
|
|
|
|
// except for ARC __strong, but that should trigger the above code.
|
|
|
|
if (ivarType.hasNonTrivialObjCLifetime() ||
|
2012-03-11 15:00:24 +08:00
|
|
|
(CGM.getLangOpts().getGC() &&
|
2011-09-13 11:34:09 +08:00
|
|
|
CGM.getContext().getObjCGCAttrKind(ivarType))) {
|
|
|
|
Kind = Expression;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute whether the ivar has strong members.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGM.getLangOpts().getGC())
|
2011-09-13 11:34:09 +08:00
|
|
|
if (const RecordType *recordType = ivarType->getAs<RecordType>())
|
|
|
|
HasStrong = recordType->getDecl()->hasObjectMember();
|
|
|
|
|
|
|
|
// We can never access structs with object members with a native
|
|
|
|
// access, because we need to use write barriers. This is what
|
|
|
|
// objc_copyStruct is for.
|
|
|
|
if (HasStrong) {
|
|
|
|
Kind = CopyStruct;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, this is target-dependent and based on the size and
|
|
|
|
// alignment of the ivar.
|
2011-09-13 15:33:34 +08:00
|
|
|
|
|
|
|
// If the size of the ivar is not a power of two, give up. We don't
|
|
|
|
// want to get into the business of doing compare-and-swaps.
|
|
|
|
if (!IvarSize.isPowerOfTwo()) {
|
|
|
|
Kind = CopyStruct;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
llvm::Triple::ArchType arch =
|
2013-04-17 06:48:15 +08:00
|
|
|
CGM.getTarget().getTriple().getArch();
|
2011-09-13 11:34:09 +08:00
|
|
|
|
|
|
|
// Most architectures require memory to fit within a single cache
|
|
|
|
// line, so the alignment has to be at least the size of the access.
|
|
|
|
// Otherwise we have to grab a lock.
|
|
|
|
if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
|
|
|
|
Kind = CopyStruct;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the ivar's size exceeds the architecture's maximum atomic
|
|
|
|
// access size, we have to use CopyStruct.
|
|
|
|
if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
|
|
|
|
Kind = CopyStruct;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we can use native loads and stores.
|
|
|
|
Kind = Native;
|
|
|
|
}
|
2008-08-26 16:29:31 +08:00
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Generate an Objective-C property getter function.
|
2012-06-16 06:10:14 +08:00
|
|
|
///
|
|
|
|
/// The given Decl must be an ObjCImplementationDecl. \@synthesize
|
2009-01-11 06:55:25 +08:00
|
|
|
/// is illegal within a category.
|
2008-12-10 04:23:04 +08:00
|
|
|
void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
|
|
|
|
const ObjCPropertyImplDecl *PID) {
|
2014-10-15 00:43:46 +08:00
|
|
|
llvm::Constant *AtomicHelperFn =
|
|
|
|
CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID);
|
2008-08-26 16:29:31 +08:00
|
|
|
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
|
|
|
|
ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
|
|
|
|
assert(OMD && "Invalid call to generate getter (empty method)");
|
2015-01-14 08:04:42 +08:00
|
|
|
StartObjCMethod(OMD, IMP->getClassInterface());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-05-30 03:56:01 +08:00
|
|
|
generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn);
|
2011-09-13 11:34:09 +08:00
|
|
|
|
|
|
|
FinishFunction();
|
|
|
|
}
|
|
|
|
|
2011-09-13 14:00:03 +08:00
|
|
|
static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
|
|
|
|
const Expr *getter = propImpl->getGetterCXXConstructor();
|
2011-09-13 11:34:09 +08:00
|
|
|
if (!getter) return true;
|
|
|
|
|
|
|
|
// Sema only makes only of these when the ivar has a C++ class type,
|
|
|
|
// so the form is pretty constrained.
|
|
|
|
|
2011-09-13 14:00:03 +08:00
|
|
|
// If the property has a reference type, we might just be binding a
|
|
|
|
// reference, in which case the result will be a gl-value. We should
|
|
|
|
// treat this as a non-trivial operation.
|
|
|
|
if (getter->isGLValue())
|
|
|
|
return false;
|
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
// If we selected a trivial copy-constructor, we're okay.
|
|
|
|
if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter))
|
|
|
|
return (construct->getConstructor()->isTrivial());
|
|
|
|
|
|
|
|
// The constructor might require cleanups (in which case it's never
|
|
|
|
// trivial).
|
|
|
|
assert(isa<ExprWithCleanups>(getter));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-07-31 03:24:48 +08:00
|
|
|
/// emitCPPObjectAtomicGetterCall - Call the runtime function to
|
2012-01-10 08:37:01 +08:00
|
|
|
/// copy the ivar into the resturn slot.
|
2018-07-31 03:24:48 +08:00
|
|
|
static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
|
2012-01-10 08:37:01 +08:00
|
|
|
llvm::Value *returnAddr,
|
|
|
|
ObjCIvarDecl *ivar,
|
|
|
|
llvm::Constant *AtomicHelperFn) {
|
|
|
|
// objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
|
|
|
|
// AtomicHelperFn);
|
|
|
|
CallArgList args;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
// The 1st argument is the return Slot.
|
|
|
|
args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
// The 2nd argument is the address of the ivar.
|
2018-07-31 03:24:48 +08:00
|
|
|
llvm::Value *ivarAddr =
|
|
|
|
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.LoadObjCSelf(), ivar, 0).getPointer();
|
2012-01-10 08:37:01 +08:00
|
|
|
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
|
|
|
|
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
// Third argument is the helper function.
|
|
|
|
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
|
|
llvm::Constant *copyCppAtomicObjectFn =
|
2012-12-18 02:54:24 +08:00
|
|
|
CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
|
2016-10-27 07:46:34 +08:00
|
|
|
CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn);
|
2016-03-11 12:30:31 +08:00
|
|
|
CGF.EmitCall(
|
|
|
|
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
|
2016-10-27 07:46:34 +08:00
|
|
|
callee, ReturnValueSlot(), args);
|
2012-01-10 08:37:01 +08:00
|
|
|
}
|
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
|
2012-01-08 02:56:22 +08:00
|
|
|
const ObjCPropertyImplDecl *propImpl,
|
2012-05-30 03:56:01 +08:00
|
|
|
const ObjCMethodDecl *GetterMethodDecl,
|
2012-01-08 02:56:22 +08:00
|
|
|
llvm::Constant *AtomicHelperFn) {
|
2011-09-13 11:34:09 +08:00
|
|
|
// If there's a non-trivial 'get' expression, we just have to emit that.
|
|
|
|
if (!hasTrivialGetExpr(propImpl)) {
|
2012-01-10 08:37:01 +08:00
|
|
|
if (!AtomicHelperFn) {
|
|
|
|
ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
|
2014-05-21 13:09:00 +08:00
|
|
|
/*nrvo*/ nullptr);
|
2012-01-10 08:37:01 +08:00
|
|
|
EmitReturnStmt(ret);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
|
2018-07-31 03:24:48 +08:00
|
|
|
emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(),
|
2012-01-10 08:37:01 +08:00
|
|
|
ivar, AtomicHelperFn);
|
|
|
|
}
|
2011-09-13 11:34:09 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
|
|
|
|
QualType propType = prop->getType();
|
|
|
|
ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl();
|
|
|
|
|
2018-07-31 03:24:48 +08:00
|
|
|
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
|
2011-09-13 11:34:09 +08:00
|
|
|
|
|
|
|
// Pick an implementation strategy.
|
|
|
|
PropertyImplStrategy strategy(CGM, propImpl);
|
|
|
|
switch (strategy.getKind()) {
|
|
|
|
case PropertyImplStrategy::Native: {
|
2012-10-27 06:38:05 +08:00
|
|
|
// We don't need to do anything for a zero-size struct.
|
|
|
|
if (strategy.getIvarSize().isZero())
|
|
|
|
return;
|
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
|
|
|
|
|
|
|
|
// Currently, all atomic accesses have to be through integer
|
|
|
|
// types, so there's no point in trying to pick a prettier type.
|
2016-05-26 08:37:30 +08:00
|
|
|
uint64_t ivarSize = getContext().toBits(strategy.getIvarSize());
|
|
|
|
llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize);
|
2011-09-13 11:34:09 +08:00
|
|
|
bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
|
|
|
|
|
|
|
|
// Perform an atomic load. This does not impose ordering constraints.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ivarAddr = LV.getAddress();
|
2011-09-13 11:34:09 +08:00
|
|
|
ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
|
|
|
|
llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
|
2016-04-07 01:26:42 +08:00
|
|
|
load->setAtomic(llvm::AtomicOrdering::Unordered);
|
2011-09-13 11:34:09 +08:00
|
|
|
|
|
|
|
// Store that value into the return address. Doing this with a
|
|
|
|
// bitcast is likely to produce some pretty ugly IR, but it's not
|
|
|
|
// the *most* terrible thing in the world.
|
2016-05-26 08:37:30 +08:00
|
|
|
llvm::Type *retTy = ConvertType(getterMethod->getReturnType());
|
|
|
|
uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy);
|
|
|
|
llvm::Value *ivarVal = load;
|
|
|
|
if (ivarSize > retTySize) {
|
|
|
|
llvm::Type *newTy = llvm::Type::getIntNTy(getLLVMContext(), retTySize);
|
|
|
|
ivarVal = Builder.CreateTrunc(load, newTy);
|
|
|
|
bitcastType = newTy->getPointerTo();
|
|
|
|
}
|
|
|
|
Builder.CreateStore(ivarVal,
|
|
|
|
Builder.CreateBitCast(ReturnValue, bitcastType));
|
2011-09-13 11:34:09 +08:00
|
|
|
|
|
|
|
// Make sure we don't do an autorelease.
|
|
|
|
AutoreleaseResult = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
case PropertyImplStrategy::GetSetProperty: {
|
2016-10-27 07:46:34 +08:00
|
|
|
llvm::Constant *getPropertyFn =
|
2011-09-13 11:34:09 +08:00
|
|
|
CGM.getObjCRuntime().GetPropertyGetFunction();
|
|
|
|
if (!getPropertyFn) {
|
|
|
|
CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
|
2008-09-24 12:04:31 +08:00
|
|
|
return;
|
|
|
|
}
|
2016-10-27 07:46:34 +08:00
|
|
|
CGCallee callee = CGCallee::forDirect(getPropertyFn);
|
2008-09-24 12:04:31 +08:00
|
|
|
|
|
|
|
// Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
|
|
|
|
// FIXME: Can't this be simpler? This might even be worse than the
|
|
|
|
// corresponding gcc code.
|
2011-09-13 11:34:09 +08:00
|
|
|
llvm::Value *cmd =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd");
|
2011-09-13 11:34:09 +08:00
|
|
|
llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
|
|
|
|
llvm::Value *ivarOffset =
|
|
|
|
EmitIvarOffset(classImpl->getClassInterface(), ivar);
|
|
|
|
|
|
|
|
CallArgList args;
|
|
|
|
args.add(RValue::get(self), getContext().getObjCIdType());
|
|
|
|
args.add(RValue::get(cmd), getContext().getObjCSelType());
|
|
|
|
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
|
2011-09-14 02:31:23 +08:00
|
|
|
args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
|
|
|
|
getContext().BoolTy);
|
2011-09-13 11:34:09 +08:00
|
|
|
|
2009-02-04 07:43:59 +08:00
|
|
|
// FIXME: We shouldn't need to get the function info here, the
|
|
|
|
// runtime already should have computed it to build the function.
|
2014-01-30 08:16:39 +08:00
|
|
|
llvm::Instruction *CallInstruction;
|
2015-11-24 06:04:44 +08:00
|
|
|
RValue RV = EmitCall(
|
2016-03-11 12:30:31 +08:00
|
|
|
getTypes().arrangeBuiltinFunctionCall(propType, args),
|
2016-10-27 07:46:34 +08:00
|
|
|
callee, ReturnValueSlot(), args, &CallInstruction);
|
2014-01-30 08:16:39 +08:00
|
|
|
if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
|
|
|
|
call->setTailCall();
|
2011-09-13 11:34:09 +08:00
|
|
|
|
2008-09-24 12:04:31 +08:00
|
|
|
// We need to fix the type here. Ivars with copy & retain are
|
|
|
|
// always objects so we don't need to worry about complex or
|
|
|
|
// aggregates.
|
2014-01-26 00:55:45 +08:00
|
|
|
RV = RValue::get(Builder.CreateBitCast(
|
|
|
|
RV.getScalarVal(),
|
|
|
|
getTypes().ConvertType(getterMethod->getReturnType())));
|
2011-09-13 11:34:09 +08:00
|
|
|
|
|
|
|
EmitReturnOfRValue(RV, propType);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// objc_getProperty does an autorelease, so we should suppress ours.
|
|
|
|
AutoreleaseResult = false;
|
2011-09-13 11:34:09 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
case PropertyImplStrategy::CopyStruct:
|
|
|
|
emitStructGetterCall(*this, ivar, strategy.isAtomic(),
|
|
|
|
strategy.hasStrongMember());
|
|
|
|
return;
|
|
|
|
|
|
|
|
case PropertyImplStrategy::Expression:
|
|
|
|
case PropertyImplStrategy::SetPropertyAndExpressionGet: {
|
|
|
|
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
|
|
|
|
|
|
|
|
QualType ivarType = ivar->getType();
|
2013-03-08 05:37:08 +08:00
|
|
|
switch (getEvaluationKind(ivarType)) {
|
|
|
|
case TEK_Complex: {
|
2013-10-02 10:29:49 +08:00
|
|
|
ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType),
|
2013-03-08 05:37:08 +08:00
|
|
|
/*init*/ true);
|
|
|
|
return;
|
|
|
|
}
|
2018-01-25 22:21:55 +08:00
|
|
|
case TEK_Aggregate: {
|
2011-09-13 11:34:09 +08:00
|
|
|
// The return value slot is guaranteed to not be aliased, but
|
|
|
|
// that's not necessarily the same as "on the stack", so
|
|
|
|
// we still potentially need objc_memmove_collectable.
|
2018-01-25 22:21:55 +08:00
|
|
|
EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType),
|
2018-04-06 04:52:58 +08:00
|
|
|
/* Src= */ LV, ivarType, overlapForReturnValue());
|
|
|
|
return;
|
|
|
|
}
|
2013-03-08 05:37:08 +08:00
|
|
|
case TEK_Scalar: {
|
2011-07-22 13:23:13 +08:00
|
|
|
llvm::Value *value;
|
|
|
|
if (propType->isReferenceType()) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
value = LV.getAddress().getPointer();
|
2011-07-22 13:23:13 +08:00
|
|
|
} else {
|
|
|
|
// We want to load and autoreleaseReturnValue ARC __weak ivars.
|
|
|
|
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
|
Define weak and __weak to mean ARC-style weak references, even in MRC.
Previously, __weak was silently accepted and ignored in MRC mode.
That makes this a potentially source-breaking change that we have to
roll out cautiously. Accordingly, for the time being, actual support
for __weak references in MRC is experimental, and the compiler will
reject attempts to actually form such references. The intent is to
eventually enable the feature by default in all non-GC modes.
(It is, of course, incompatible with ObjC GC's interpretation of
__weak.)
If you like, you can enable this feature with
-Xclang -fobjc-weak
but like any -Xclang option, this option may be removed at any point,
e.g. if/when it is eventually enabled by default.
This patch also enables the use of the ARC __unsafe_unretained qualifier
in MRC. Unlike __weak, this is being enabled immediately. Since
variables are essentially __unsafe_unretained by default in MRC,
the only practical uses are (1) communication and (2) changing the
default behavior of by-value block capture.
As an implementation matter, this means that the ObjC ownership
qualifiers may appear in any ObjC language mode, and so this patch
removes a number of checks for getLangOpts().ObjCAutoRefCount
that were guarding the processing of these qualifiers. I don't
expect this to be a significant drain on performance; it may even
be faster to just check for these qualifiers directly on a type
(since it's probably in a register anyway) than to do N dependent
loads to grab the LangOptions.
rdar://9674298
llvm-svn: 251041
2015-10-23 02:38:17 +08:00
|
|
|
if (getLangOpts().ObjCAutoRefCount) {
|
|
|
|
value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
|
|
|
|
} else {
|
|
|
|
value = EmitARCLoadWeak(LV.getAddress());
|
|
|
|
}
|
2011-07-22 13:23:13 +08:00
|
|
|
|
|
|
|
// Otherwise we want to do a simple load, suppressing the
|
|
|
|
// final autorelease.
|
2011-06-16 07:02:42 +08:00
|
|
|
} else {
|
2013-10-02 10:29:49 +08:00
|
|
|
value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal();
|
2011-07-22 13:23:13 +08:00
|
|
|
AutoreleaseResult = false;
|
2011-03-29 07:47:18 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2014-01-26 00:55:45 +08:00
|
|
|
value = Builder.CreateBitCast(
|
|
|
|
value, ConvertType(GetterMethodDecl->getReturnType()));
|
2011-07-22 13:23:13 +08:00
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2011-07-22 13:23:13 +08:00
|
|
|
EmitReturnOfRValue(RValue::get(value), propType);
|
2013-03-08 05:37:08 +08:00
|
|
|
return;
|
2009-03-04 02:49:40 +08:00
|
|
|
}
|
2013-03-08 05:37:08 +08:00
|
|
|
}
|
|
|
|
llvm_unreachable("bad evaluation kind");
|
2008-09-24 12:04:31 +08:00
|
|
|
}
|
2008-08-26 16:29:31 +08:00
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
}
|
|
|
|
llvm_unreachable("bad @property implementation strategy!");
|
2008-08-26 16:29:31 +08:00
|
|
|
}
|
|
|
|
|
2011-09-13 07:06:44 +08:00
|
|
|
/// emitStructSetterCall - Call the runtime function to store the value
|
|
|
|
/// from the first formal parameter into the given ivar.
|
|
|
|
static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
|
|
|
|
ObjCIvarDecl *ivar) {
|
2018-07-31 03:24:48 +08:00
|
|
|
// objc_copyStruct (&structIvar, &Arg,
|
2011-02-19 03:15:13 +08:00
|
|
|
// sizeof (struct something), true, false);
|
2011-09-10 17:30:49 +08:00
|
|
|
CallArgList args;
|
|
|
|
|
|
|
|
// The first argument is the address of the ivar.
|
2011-09-13 07:06:44 +08:00
|
|
|
llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
|
|
|
|
CGF.LoadObjCSelf(), ivar, 0)
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
.getPointer();
|
2011-09-13 07:06:44 +08:00
|
|
|
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
|
|
|
|
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
|
2011-09-10 17:30:49 +08:00
|
|
|
|
|
|
|
// The second argument is the address of the parameter variable.
|
2011-09-13 07:06:44 +08:00
|
|
|
ParmVarDecl *argVar = *OMD->param_begin();
|
2018-07-31 03:24:48 +08:00
|
|
|
DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
|
2012-01-05 08:10:16 +08:00
|
|
|
VK_LValue, SourceLocation());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
|
2011-09-13 07:06:44 +08:00
|
|
|
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
|
|
|
|
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
|
2011-09-10 17:30:49 +08:00
|
|
|
|
|
|
|
// The third argument is the sizeof the type.
|
|
|
|
llvm::Value *size =
|
2011-09-13 07:06:44 +08:00
|
|
|
CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType()));
|
|
|
|
args.add(RValue::get(size), CGF.getContext().getSizeType());
|
|
|
|
|
|
|
|
// The fourth argument is the 'isAtomic' flag.
|
|
|
|
args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy);
|
2011-09-10 17:30:49 +08:00
|
|
|
|
2011-09-13 07:06:44 +08:00
|
|
|
// The fifth argument is the 'hasStrong' flag.
|
|
|
|
// FIXME: should this really always be false?
|
|
|
|
args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
|
2011-09-10 17:30:49 +08:00
|
|
|
|
2016-10-27 07:46:34 +08:00
|
|
|
llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
|
|
|
|
CGCallee callee = CGCallee::forDirect(fn);
|
2016-03-11 12:30:31 +08:00
|
|
|
CGF.EmitCall(
|
|
|
|
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
|
2016-10-27 07:46:34 +08:00
|
|
|
callee, ReturnValueSlot(), args);
|
2011-02-19 03:15:13 +08:00
|
|
|
}
|
|
|
|
|
2018-07-31 03:24:48 +08:00
|
|
|
/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
|
|
|
|
/// the value from the first formal parameter into the given ivar, using
|
2012-01-07 06:33:54 +08:00
|
|
|
/// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
|
2018-07-31 03:24:48 +08:00
|
|
|
static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
|
2012-01-07 06:33:54 +08:00
|
|
|
ObjCMethodDecl *OMD,
|
|
|
|
ObjCIvarDecl *ivar,
|
|
|
|
llvm::Constant *AtomicHelperFn) {
|
2018-07-31 03:24:48 +08:00
|
|
|
// objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
|
2012-01-07 06:33:54 +08:00
|
|
|
// AtomicHelperFn);
|
|
|
|
CallArgList args;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-07 06:33:54 +08:00
|
|
|
// The first argument is the address of the ivar.
|
2018-07-31 03:24:48 +08:00
|
|
|
llvm::Value *ivarAddr =
|
|
|
|
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.LoadObjCSelf(), ivar, 0).getPointer();
|
2012-01-07 06:33:54 +08:00
|
|
|
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
|
|
|
|
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-07 06:33:54 +08:00
|
|
|
// The second argument is the address of the parameter variable.
|
|
|
|
ParmVarDecl *argVar = *OMD->param_begin();
|
2018-07-31 03:24:48 +08:00
|
|
|
DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
|
2012-01-07 06:33:54 +08:00
|
|
|
VK_LValue, SourceLocation());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
|
2012-01-07 06:33:54 +08:00
|
|
|
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
|
|
|
|
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-07 06:33:54 +08:00
|
|
|
// Third argument is the helper function.
|
|
|
|
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
|
|
llvm::Constant *fn =
|
2012-12-18 02:54:24 +08:00
|
|
|
CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
|
2016-10-27 07:46:34 +08:00
|
|
|
CGCallee callee = CGCallee::forDirect(fn);
|
2016-03-11 12:30:31 +08:00
|
|
|
CGF.EmitCall(
|
|
|
|
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
|
2016-10-27 07:46:34 +08:00
|
|
|
callee, ReturnValueSlot(), args);
|
2012-01-07 06:33:54 +08:00
|
|
|
}
|
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
|
|
|
|
Expr *setter = PID->getSetterCXXAssignment();
|
|
|
|
if (!setter) return true;
|
|
|
|
|
|
|
|
// Sema only makes only of these when the ivar has a C++ class type,
|
|
|
|
// so the form is pretty constrained.
|
2011-09-10 17:17:20 +08:00
|
|
|
|
|
|
|
// An operator call is trivial if the function it calls is trivial.
|
2011-09-13 11:34:09 +08:00
|
|
|
// This also implies that there's nothing non-trivial going on with
|
|
|
|
// the arguments, because operator= can only be trivial if it's a
|
|
|
|
// synthesized assignment operator and therefore both parameters are
|
|
|
|
// references.
|
|
|
|
if (CallExpr *call = dyn_cast<CallExpr>(setter)) {
|
2011-09-10 17:17:20 +08:00
|
|
|
if (const FunctionDecl *callee
|
|
|
|
= dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl()))
|
|
|
|
if (callee->isTrivial())
|
|
|
|
return true;
|
|
|
|
return false;
|
2011-04-07 00:05:26 +08:00
|
|
|
}
|
2011-09-10 17:17:20 +08:00
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
assert(isa<ExprWithCleanups>(setter));
|
2011-09-10 17:17:20 +08:00
|
|
|
return false;
|
|
|
|
}
|
2008-09-24 14:32:09 +08:00
|
|
|
|
2012-03-11 04:38:56 +08:00
|
|
|
static bool UseOptimizedSetter(CodeGenModule &CGM) {
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
|
2012-03-07 04:05:56 +08:00
|
|
|
return false;
|
2012-10-16 06:23:53 +08:00
|
|
|
return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter();
|
2012-03-07 04:05:56 +08:00
|
|
|
}
|
|
|
|
|
2011-09-10 17:17:20 +08:00
|
|
|
void
|
|
|
|
CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
|
2012-01-07 06:33:54 +08:00
|
|
|
const ObjCPropertyImplDecl *propImpl,
|
|
|
|
llvm::Constant *AtomicHelperFn) {
|
|
|
|
const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
|
|
|
|
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
|
|
|
|
ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2011-09-10 17:17:20 +08:00
|
|
|
// Just use the setter expression if Sema gave us one and it's
|
2012-01-07 06:33:54 +08:00
|
|
|
// non-trivial.
|
2011-09-13 11:34:09 +08:00
|
|
|
if (!hasTrivialSetExpr(propImpl)) {
|
2012-01-07 06:33:54 +08:00
|
|
|
if (!AtomicHelperFn)
|
|
|
|
// If non-atomic, assignment is called directly.
|
|
|
|
EmitStmt(propImpl->getSetterCXXAssignment());
|
|
|
|
else
|
|
|
|
// If atomic, assignment is called via a locking api.
|
|
|
|
emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
|
|
|
|
AtomicHelperFn);
|
2011-09-10 17:17:20 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
PropertyImplStrategy strategy(CGM, propImpl);
|
|
|
|
switch (strategy.getKind()) {
|
|
|
|
case PropertyImplStrategy::Native: {
|
2012-10-27 06:38:05 +08:00
|
|
|
// We don't need to do anything for a zero-size struct.
|
|
|
|
if (strategy.getIvarSize().isZero())
|
|
|
|
return;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
|
2011-09-10 17:17:20 +08:00
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
LValue ivarLValue =
|
|
|
|
EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ivarAddr = ivarLValue.getAddress();
|
2011-09-13 11:34:09 +08:00
|
|
|
|
|
|
|
// Currently, all atomic accesses have to be through integer
|
|
|
|
// types, so there's no point in trying to pick a prettier type.
|
|
|
|
llvm::Type *bitcastType =
|
|
|
|
llvm::Type::getIntNTy(getLLVMContext(),
|
|
|
|
getContext().toBits(strategy.getIvarSize()));
|
|
|
|
|
|
|
|
// Cast both arguments to the chosen operation type.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
argAddr = Builder.CreateElementBitCast(argAddr, bitcastType);
|
|
|
|
ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType);
|
2011-09-10 17:17:20 +08:00
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
// This bitcast load is likely to cause some nasty IR.
|
|
|
|
llvm::Value *load = Builder.CreateLoad(argAddr);
|
|
|
|
|
|
|
|
// Perform an atomic store. There are no memory ordering requirements.
|
|
|
|
llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
|
2016-04-07 01:26:42 +08:00
|
|
|
store->setAtomic(llvm::AtomicOrdering::Unordered);
|
2011-09-13 11:34:09 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
case PropertyImplStrategy::GetSetProperty:
|
|
|
|
case PropertyImplStrategy::SetPropertyAndExpressionGet: {
|
2014-05-21 13:09:00 +08:00
|
|
|
|
2016-10-27 07:46:34 +08:00
|
|
|
llvm::Constant *setOptimizedPropertyFn = nullptr;
|
|
|
|
llvm::Constant *setPropertyFn = nullptr;
|
2012-03-07 04:05:56 +08:00
|
|
|
if (UseOptimizedSetter(CGM)) {
|
2012-10-16 06:23:53 +08:00
|
|
|
// 10.8 and iOS 6.0 code and GC is off
|
2018-07-31 03:24:48 +08:00
|
|
|
setOptimizedPropertyFn =
|
2012-03-30 01:31:31 +08:00
|
|
|
CGM.getObjCRuntime()
|
|
|
|
.GetOptimizedPropertySetFunction(strategy.isAtomic(),
|
|
|
|
strategy.isCopy());
|
2012-03-07 04:05:56 +08:00
|
|
|
if (!setOptimizedPropertyFn) {
|
|
|
|
CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
|
|
|
|
return;
|
|
|
|
}
|
2008-09-24 14:32:09 +08:00
|
|
|
}
|
2012-03-07 04:05:56 +08:00
|
|
|
else {
|
|
|
|
setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
|
|
|
|
if (!setPropertyFn) {
|
|
|
|
CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
// Emit objc_setProperty((id) self, _cmd, offset, arg,
|
2008-09-24 14:32:09 +08:00
|
|
|
// <is-atomic>, <is-copy>).
|
2011-09-10 17:17:20 +08:00
|
|
|
llvm::Value *cmd =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl()));
|
2011-09-10 17:17:20 +08:00
|
|
|
llvm::Value *self =
|
|
|
|
Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
|
|
|
|
llvm::Value *ivarOffset =
|
|
|
|
EmitIvarOffset(classImpl->getClassInterface(), ivar);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
|
|
|
|
llvm::Value *arg = Builder.CreateLoad(argAddr, "arg");
|
|
|
|
arg = Builder.CreateBitCast(arg, VoidPtrTy);
|
2011-09-10 17:17:20 +08:00
|
|
|
|
|
|
|
CallArgList args;
|
|
|
|
args.add(RValue::get(self), getContext().getObjCIdType());
|
|
|
|
args.add(RValue::get(cmd), getContext().getObjCSelType());
|
2012-03-07 04:05:56 +08:00
|
|
|
if (setOptimizedPropertyFn) {
|
|
|
|
args.add(RValue::get(arg), getContext().getObjCIdType());
|
|
|
|
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
|
2016-10-27 07:46:34 +08:00
|
|
|
CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn);
|
2016-03-11 12:30:31 +08:00
|
|
|
EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
|
2016-10-27 07:46:34 +08:00
|
|
|
callee, ReturnValueSlot(), args);
|
2012-03-07 04:05:56 +08:00
|
|
|
} else {
|
|
|
|
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
|
|
|
|
args.add(RValue::get(arg), getContext().getObjCIdType());
|
|
|
|
args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
|
|
|
|
getContext().BoolTy);
|
|
|
|
args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
|
|
|
|
getContext().BoolTy);
|
|
|
|
// FIXME: We shouldn't need to get the function info here, the runtime
|
|
|
|
// already should have computed it to build the function.
|
2016-10-27 07:46:34 +08:00
|
|
|
CGCallee callee = CGCallee::forDirect(setPropertyFn);
|
2016-03-11 12:30:31 +08:00
|
|
|
EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
|
2016-10-27 07:46:34 +08:00
|
|
|
callee, ReturnValueSlot(), args);
|
2012-03-07 04:05:56 +08:00
|
|
|
}
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2011-09-10 17:17:20 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-09-13 11:34:09 +08:00
|
|
|
case PropertyImplStrategy::CopyStruct:
|
2011-09-13 07:06:44 +08:00
|
|
|
emitStructSetterCall(*this, setterMethod, ivar);
|
2011-09-10 17:17:20 +08:00
|
|
|
return;
|
2011-09-13 11:34:09 +08:00
|
|
|
|
|
|
|
case PropertyImplStrategy::Expression:
|
|
|
|
break;
|
2011-09-10 17:17:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, fake up some ASTs and emit a normal assignment.
|
|
|
|
ValueDecl *selfDecl = setterMethod->getSelfDecl();
|
2012-03-10 17:33:50 +08:00
|
|
|
DeclRefExpr self(selfDecl, false, selfDecl->getType(),
|
|
|
|
VK_LValue, SourceLocation());
|
2011-09-10 17:17:20 +08:00
|
|
|
ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
|
|
|
|
selfDecl->getType(), CK_LValueToRValue, &self,
|
|
|
|
VK_RValue);
|
|
|
|
ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
|
2013-04-03 02:57:54 +08:00
|
|
|
SourceLocation(), SourceLocation(),
|
|
|
|
&selfLoad, true, true);
|
2011-09-10 17:17:20 +08:00
|
|
|
|
|
|
|
ParmVarDecl *argDecl = *setterMethod->param_begin();
|
|
|
|
QualType argType = argDecl->getType().getNonReferenceType();
|
2012-03-10 17:33:50 +08:00
|
|
|
DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation());
|
2011-09-10 17:17:20 +08:00
|
|
|
ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
|
|
|
|
argType.getUnqualifiedType(), CK_LValueToRValue,
|
|
|
|
&arg, VK_RValue);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2011-09-10 17:17:20 +08:00
|
|
|
// The property type can differ from the ivar type in some situations with
|
|
|
|
// Objective-C pointer types, we can always bit cast the RHS in these cases.
|
|
|
|
// The following absurdity is just to ensure well-formed IR.
|
|
|
|
CastKind argCK = CK_NoOp;
|
|
|
|
if (ivarRef.getType()->isObjCObjectPointerType()) {
|
|
|
|
if (argLoad.getType()->isObjCObjectPointerType())
|
|
|
|
argCK = CK_BitCast;
|
|
|
|
else if (argLoad.getType()->isBlockPointerType())
|
|
|
|
argCK = CK_BlockPointerToObjCPointerCast;
|
|
|
|
else
|
|
|
|
argCK = CK_CPointerToObjCPointerCast;
|
|
|
|
} else if (ivarRef.getType()->isBlockPointerType()) {
|
|
|
|
if (argLoad.getType()->isBlockPointerType())
|
|
|
|
argCK = CK_BitCast;
|
|
|
|
else
|
|
|
|
argCK = CK_AnyPointerToBlockPointerCast;
|
|
|
|
} else if (ivarRef.getType()->isPointerType()) {
|
|
|
|
argCK = CK_BitCast;
|
2008-09-24 14:32:09 +08:00
|
|
|
}
|
2011-09-10 17:17:20 +08:00
|
|
|
ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
|
|
|
|
ivarRef.getType(), argCK, &argLoad,
|
|
|
|
VK_RValue);
|
|
|
|
Expr *finalArg = &argLoad;
|
|
|
|
if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
|
|
|
|
argLoad.getType()))
|
|
|
|
finalArg = &argCast;
|
|
|
|
|
|
|
|
|
|
|
|
BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
|
|
|
|
ivarRef.getType(), VK_RValue, OK_Ordinary,
|
2017-03-28 03:17:25 +08:00
|
|
|
SourceLocation(), FPOptions());
|
2011-09-10 17:17:20 +08:00
|
|
|
EmitStmt(&assign);
|
|
|
|
}
|
|
|
|
|
2018-05-09 09:00:01 +08:00
|
|
|
/// Generate an Objective-C property setter function.
|
2012-06-16 06:10:14 +08:00
|
|
|
///
|
|
|
|
/// The given Decl must be an ObjCImplementationDecl. \@synthesize
|
2011-09-10 17:17:20 +08:00
|
|
|
/// is illegal within a category.
|
|
|
|
void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
|
|
|
|
const ObjCPropertyImplDecl *PID) {
|
2014-10-15 00:43:46 +08:00
|
|
|
llvm::Constant *AtomicHelperFn =
|
|
|
|
CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID);
|
2011-09-10 17:17:20 +08:00
|
|
|
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
|
|
|
|
ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
|
|
|
|
assert(OMD && "Invalid call to generate setter (empty method)");
|
2015-01-14 08:04:42 +08:00
|
|
|
StartObjCMethod(OMD, IMP->getClassInterface());
|
2011-09-10 17:17:20 +08:00
|
|
|
|
2012-01-07 06:33:54 +08:00
|
|
|
generateObjCSetterBody(IMP, PID, AtomicHelperFn);
|
2008-08-26 16:29:31 +08:00
|
|
|
|
|
|
|
FinishFunction();
|
2008-06-18 02:05:57 +08:00
|
|
|
}
|
|
|
|
|
2011-03-22 15:05:39 +08:00
|
|
|
namespace {
|
2015-08-19 06:40:54 +08:00
|
|
|
struct DestroyIvar final : EHScopeStack::Cleanup {
|
2011-07-13 00:41:08 +08:00
|
|
|
private:
|
|
|
|
llvm::Value *addr;
|
2011-03-22 15:05:39 +08:00
|
|
|
const ObjCIvarDecl *ivar;
|
2012-01-26 11:33:36 +08:00
|
|
|
CodeGenFunction::Destroyer *destroyer;
|
2011-07-13 00:41:08 +08:00
|
|
|
bool useEHCleanupForArray;
|
|
|
|
public:
|
|
|
|
DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
|
|
|
|
CodeGenFunction::Destroyer *destroyer,
|
|
|
|
bool useEHCleanupForArray)
|
2012-01-26 11:33:36 +08:00
|
|
|
: addr(addr), ivar(ivar), destroyer(destroyer),
|
2011-07-13 00:41:08 +08:00
|
|
|
useEHCleanupForArray(useEHCleanupForArray) {}
|
2011-03-22 15:05:39 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-07-13 00:41:08 +08:00
|
|
|
LValue lvalue
|
|
|
|
= CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
|
|
|
|
CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
|
2011-07-13 04:27:29 +08:00
|
|
|
flags.isForNormalCleanup() && useEHCleanupForArray);
|
2011-03-22 15:05:39 +08:00
|
|
|
}
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2011-03-22 15:05:39 +08:00
|
|
|
|
2011-07-13 00:41:08 +08:00
|
|
|
/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
|
|
|
|
static void destroyARCStrongWithStore(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr,
|
2011-07-13 00:41:08 +08:00
|
|
|
QualType type) {
|
|
|
|
llvm::Value *null = getNullForVariable(addr);
|
|
|
|
CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
|
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2011-03-22 15:05:39 +08:00
|
|
|
static void emitCXXDestructMethod(CodeGenFunction &CGF,
|
|
|
|
ObjCImplementationDecl *impl) {
|
|
|
|
CodeGenFunction::RunCleanupsScope scope(CGF);
|
|
|
|
|
|
|
|
llvm::Value *self = CGF.LoadObjCSelf();
|
|
|
|
|
2011-07-22 10:08:32 +08:00
|
|
|
const ObjCInterfaceDecl *iface = impl->getClassInterface();
|
|
|
|
for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
|
2011-03-22 15:05:39 +08:00
|
|
|
ivar; ivar = ivar->getNextIvar()) {
|
|
|
|
QualType type = ivar->getType();
|
|
|
|
|
|
|
|
// Check whether the ivar is a destructible type.
|
2011-07-13 00:41:08 +08:00
|
|
|
QualType::DestructionKind dtorKind = type.isDestructedType();
|
|
|
|
if (!dtorKind) continue;
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
CodeGenFunction::Destroyer *destroyer = nullptr;
|
2011-07-13 00:41:08 +08:00
|
|
|
|
|
|
|
// Use a call to objc_storeStrong to destroy strong ivars, for the
|
|
|
|
// general benefit of the tools.
|
|
|
|
if (dtorKind == QualType::DK_objc_strong_lifetime) {
|
2012-01-26 11:33:36 +08:00
|
|
|
destroyer = destroyARCStrongWithStore;
|
2011-07-13 00:41:08 +08:00
|
|
|
|
|
|
|
// Otherwise use the default for the destruction kind.
|
|
|
|
} else {
|
2012-01-26 11:33:36 +08:00
|
|
|
destroyer = CGF.getDestroyer(dtorKind);
|
2011-03-22 15:05:39 +08:00
|
|
|
}
|
2011-07-13 00:41:08 +08:00
|
|
|
|
|
|
|
CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
|
|
|
|
|
|
|
|
CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
|
|
|
|
cleanupKind & EHCleanup);
|
2011-03-22 15:05:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
|
|
|
|
}
|
|
|
|
|
2010-04-29 05:28:56 +08:00
|
|
|
void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
|
|
|
|
ObjCMethodDecl *MD,
|
|
|
|
bool ctor) {
|
|
|
|
MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
|
2015-01-14 08:04:42 +08:00
|
|
|
StartObjCMethod(MD, IMP->getClassInterface());
|
2011-03-22 15:05:39 +08:00
|
|
|
|
|
|
|
// Emit .cxx_construct.
|
2010-04-29 05:28:56 +08:00
|
|
|
if (ctor) {
|
2011-06-16 07:02:42 +08:00
|
|
|
// Suppress the final autorelease in ARC.
|
|
|
|
AutoreleaseResult = false;
|
|
|
|
|
2014-03-14 01:35:02 +08:00
|
|
|
for (const auto *IvarInit : IMP->inits()) {
|
2010-12-04 17:14:42 +08:00
|
|
|
FieldDecl *Field = IvarInit->getAnyMember();
|
2014-03-14 01:35:02 +08:00
|
|
|
ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
|
2018-07-31 03:24:48 +08:00
|
|
|
LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
|
2010-04-29 06:30:33 +08:00
|
|
|
LoadObjCSelf(), Ivar, 0);
|
2011-08-26 04:40:09 +08:00
|
|
|
EmitAggExpr(IvarInit->getInit(),
|
|
|
|
AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
|
2011-08-26 07:04:34 +08:00
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2018-04-06 04:52:58 +08:00
|
|
|
AggValueSlot::IsNotAliased,
|
|
|
|
AggValueSlot::DoesNotOverlap));
|
2010-04-29 05:28:56 +08:00
|
|
|
}
|
|
|
|
// constructor returns 'self'.
|
|
|
|
CodeGenTypes &Types = CGM.getTypes();
|
|
|
|
QualType IdTy(CGM.getContext().getObjCIdType());
|
|
|
|
llvm::Value *SelfAsId =
|
|
|
|
Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
|
|
|
|
EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
|
2011-03-22 15:05:39 +08:00
|
|
|
|
|
|
|
// Emit .cxx_destruct.
|
2010-05-06 08:20:39 +08:00
|
|
|
} else {
|
2011-03-22 15:05:39 +08:00
|
|
|
emitCXXDestructMethod(*this, IMP);
|
2010-04-29 05:28:56 +08:00
|
|
|
}
|
|
|
|
FinishFunction();
|
|
|
|
}
|
|
|
|
|
2008-09-24 12:04:31 +08:00
|
|
|
llvm::Value *CodeGenFunction::LoadObjCSelf() {
|
2013-05-03 15:33:41 +08:00
|
|
|
VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
|
|
|
|
DeclRefExpr DRE(Self, /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
|
|
|
|
Self->getType(), VK_LValue, SourceLocation());
|
2013-10-02 10:29:49 +08:00
|
|
|
return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation());
|
2008-06-18 02:05:57 +08:00
|
|
|
}
|
|
|
|
|
2009-02-03 08:09:52 +08:00
|
|
|
QualType CodeGenFunction::TypeOfSelfObject() {
|
|
|
|
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
|
|
|
|
ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
|
2009-07-11 07:34:53 +08:00
|
|
|
const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
|
|
|
|
getContext().getCanonicalType(selfDecl->getType()));
|
2009-02-03 08:09:52 +08:00
|
|
|
return PTy->getPointeeType();
|
|
|
|
}
|
|
|
|
|
2009-03-23 05:03:39 +08:00
|
|
|
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
|
2016-10-27 07:46:34 +08:00
|
|
|
llvm::Constant *EnumerationMutationFnPtr =
|
2008-09-24 12:04:31 +08:00
|
|
|
CGM.getObjCRuntime().EnumerationMutationFunction();
|
2016-10-27 07:46:34 +08:00
|
|
|
if (!EnumerationMutationFnPtr) {
|
2008-09-24 12:04:31 +08:00
|
|
|
CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
|
|
|
|
return;
|
|
|
|
}
|
2016-10-27 07:46:34 +08:00
|
|
|
CGCallee EnumerationMutationFn =
|
|
|
|
CGCallee::forDirect(EnumerationMutationFnPtr);
|
2008-09-24 12:04:31 +08:00
|
|
|
|
2011-01-19 09:36:36 +08:00
|
|
|
CGDebugInfo *DI = getDebugInfo();
|
2011-10-14 05:45:18 +08:00
|
|
|
if (DI)
|
|
|
|
DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
|
2011-01-19 09:36:36 +08:00
|
|
|
|
2017-04-15 00:53:25 +08:00
|
|
|
RunCleanupsScope ForScope(*this);
|
|
|
|
|
2017-04-14 09:00:03 +08:00
|
|
|
// The local variable comes into scope immediately.
|
|
|
|
AutoVarEmission variable = AutoVarEmission::invalid();
|
|
|
|
if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
|
|
|
|
variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
|
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-08-31 10:33:12 +08:00
|
|
|
// Fast enumeration state.
|
2011-08-10 01:23:49 +08:00
|
|
|
QualType StateTy = CGM.getObjCFastEnumerationStateType();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address StatePtr = CreateMemTemp(StateTy, "state.ptr");
|
2010-05-23 01:35:42 +08:00
|
|
|
EmitNullInitialization(StatePtr, StateTy);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-08-31 10:33:12 +08:00
|
|
|
// Number of elements in the items array.
|
2008-08-31 12:05:03 +08:00
|
|
|
static const unsigned NumItems = 16;
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Fetch the countByEnumeratingWithState:objects:count: selector.
|
2010-03-30 19:36:44 +08:00
|
|
|
IdentifierInfo *II[] = {
|
|
|
|
&CGM.getContext().Idents.get("countByEnumeratingWithState"),
|
|
|
|
&CGM.getContext().Idents.get("objects"),
|
|
|
|
&CGM.getContext().Idents.get("count")
|
|
|
|
};
|
|
|
|
Selector FastEnumSel =
|
|
|
|
CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
|
2008-08-31 10:33:12 +08:00
|
|
|
|
|
|
|
QualType ItemsTy =
|
|
|
|
getContext().getConstantArrayType(getContext().getObjCIdType(),
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::APInt(32, NumItems),
|
2008-08-31 10:33:12 +08:00
|
|
|
ArrayType::Normal, 0);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-27 09:07:15 +08:00
|
|
|
// Emit the collection pointer. In ARC, we do a retain.
|
|
|
|
llvm::Value *Collection;
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getLangOpts().ObjCAutoRefCount) {
|
2011-07-27 09:07:15 +08:00
|
|
|
Collection = EmitARCRetainScalarExpr(S.getCollection());
|
|
|
|
|
|
|
|
// Enter a cleanup to do the release.
|
|
|
|
EmitObjCConsumeObject(S.getCollection()->getType(), Collection);
|
|
|
|
} else {
|
|
|
|
Collection = EmitScalarExpr(S.getCollection());
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-08-05 08:14:38 +08:00
|
|
|
// The 'continue' label needs to appear within the cleanup for the
|
|
|
|
// collection object.
|
|
|
|
JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
|
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Send it our message:
|
2008-08-31 10:33:12 +08:00
|
|
|
CallArgList Args;
|
2011-01-07 09:49:06 +08:00
|
|
|
|
|
|
|
// The first argument is a temporary of the enumeration-state type.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Args.add(RValue::get(StatePtr.getPointer()),
|
|
|
|
getContext().getPointerType(StateTy));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// The second argument is a temporary array with space for NumItems
|
|
|
|
// pointers. We'll actually be loading elements from the array
|
|
|
|
// pointer written into the control state; this buffer is so that
|
|
|
|
// collections that *aren't* backed by arrays can still queue up
|
|
|
|
// batches of elements.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Args.add(RValue::get(ItemsPtr.getPointer()),
|
|
|
|
getContext().getPointerType(ItemsTy));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// The third argument is the capacity of that temporary array.
|
2017-09-09 07:41:17 +08:00
|
|
|
llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType());
|
|
|
|
llvm::Constant *Count = llvm::ConstantInt::get(NSUIntegerTy, NumItems);
|
|
|
|
Args.add(RValue::get(Count), getContext().getNSUIntegerType());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Start the enumeration.
|
2009-09-09 23:08:12 +08:00
|
|
|
RValue CountRV =
|
2017-09-09 07:41:17 +08:00
|
|
|
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
|
|
|
|
getContext().getNSUIntegerType(),
|
|
|
|
FastEnumSel, Collection, Args);
|
2008-08-31 10:33:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// The initial number of objects that were returned in the buffer.
|
|
|
|
llvm::Value *initialBufferLimit = CountRV.getScalarVal();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
|
|
|
|
llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
|
2008-08-31 10:33:12 +08:00
|
|
|
|
2017-09-09 07:41:17 +08:00
|
|
|
llvm::Value *zero = llvm::Constant::getNullValue(NSUIntegerTy);
|
2008-08-31 10:33:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// If the limit pointer was zero to begin with, the collection is
|
2014-03-26 07:26:31 +08:00
|
|
|
// empty; skip all this. Set the branch weight assuming this has the same
|
|
|
|
// probability of exiting the loop as any other loop exit.
|
2015-04-24 07:06:47 +08:00
|
|
|
uint64_t EntryCount = getCurrentProfileCount();
|
|
|
|
Builder.CreateCondBr(
|
|
|
|
Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB,
|
|
|
|
LoopInitBB,
|
2015-05-02 13:00:55 +08:00
|
|
|
createProfileWeights(EntryCount, getProfileCount(S.getBody())));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Otherwise, initialize the loop.
|
|
|
|
EmitBlock(LoopInitBB);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Save the initial mutations value. This is the value at an
|
|
|
|
// address that was written into the state object by
|
|
|
|
// countByEnumeratingWithState:objects:count:.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address StateMutationsPtrPtr = Builder.CreateStructGEP(
|
|
|
|
StatePtr, 2, 2 * getPointerSize(), "mutationsptr.ptr");
|
|
|
|
llvm::Value *StateMutationsPtr
|
|
|
|
= Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
llvm::Value *initialMutations =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
|
|
|
|
"forcoll.initial-mutations");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Start looping. This is the point we return to whenever we have a
|
|
|
|
// fresh, non-empty batch of objects.
|
|
|
|
llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
|
|
|
|
EmitBlock(LoopBodyBB);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// The current index into the buffer.
|
2017-09-09 07:41:17 +08:00
|
|
|
llvm::PHINode *index = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.index");
|
2011-01-07 09:49:06 +08:00
|
|
|
index->addIncoming(zero, LoopInitBB);
|
2008-08-31 10:33:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// The current buffer size.
|
2017-09-09 07:41:17 +08:00
|
|
|
llvm::PHINode *count = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.count");
|
2011-01-07 09:49:06 +08:00
|
|
|
count->addIncoming(initialBufferLimit, LoopInitBB);
|
2008-08-31 10:33:12 +08:00
|
|
|
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(&S);
|
2014-02-24 09:13:09 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Check whether the mutations value has changed from where it was
|
|
|
|
// at start. StateMutationsPtr should actually be invariant between
|
|
|
|
// refreshes.
|
2008-08-31 12:05:03 +08:00
|
|
|
StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
|
2011-01-07 09:49:06 +08:00
|
|
|
llvm::Value *currentMutations
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
= Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
|
|
|
|
"statemutations");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
|
2011-03-03 06:39:34 +08:00
|
|
|
llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
|
|
|
|
WasNotMutatedBB, WasMutatedBB);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// If so, call the enumeration-mutation function.
|
|
|
|
EmitBlock(WasMutatedBB);
|
2008-08-31 12:05:03 +08:00
|
|
|
llvm::Value *V =
|
2009-09-09 23:08:12 +08:00
|
|
|
Builder.CreateBitCast(Collection,
|
2011-09-28 05:06:10 +08:00
|
|
|
ConvertType(getContext().getObjCIdType()));
|
2009-02-04 07:55:40 +08:00
|
|
|
CallArgList Args2;
|
2011-05-03 01:57:46 +08:00
|
|
|
Args2.add(RValue::get(V), getContext().getObjCIdType());
|
2009-05-16 15:57:57 +08:00
|
|
|
// FIXME: We shouldn't need to get the function info here, the runtime already
|
|
|
|
// should have computed it to build the function.
|
2016-03-11 12:30:31 +08:00
|
|
|
EmitCall(
|
|
|
|
CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2),
|
2009-12-25 03:25:24 +08:00
|
|
|
EnumerationMutationFn, ReturnValueSlot(), Args2);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Otherwise, or if the mutation function returns, just continue.
|
|
|
|
EmitBlock(WasNotMutatedBB);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Initialize the element variable.
|
|
|
|
RunCleanupsScope elementVariableScope(*this);
|
2011-02-22 15:16:58 +08:00
|
|
|
bool elementIsVariable;
|
2011-01-07 09:49:06 +08:00
|
|
|
LValue elementLValue;
|
|
|
|
QualType elementType;
|
|
|
|
if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
|
2011-02-22 15:16:58 +08:00
|
|
|
// Initialize the variable, in case it's a __block variable or something.
|
|
|
|
EmitAutoVarInit(variable);
|
2008-08-31 10:33:12 +08:00
|
|
|
|
2011-02-22 15:16:58 +08:00
|
|
|
const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
|
2012-03-10 17:33:50 +08:00
|
|
|
DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(),
|
2011-01-07 09:49:06 +08:00
|
|
|
VK_LValue, SourceLocation());
|
|
|
|
elementLValue = EmitLValue(&tempDRE);
|
|
|
|
elementType = D->getType();
|
2011-02-22 15:16:58 +08:00
|
|
|
elementIsVariable = true;
|
2011-06-17 14:42:21 +08:00
|
|
|
|
|
|
|
if (D->isARCPseudoStrong())
|
|
|
|
elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
|
2011-01-07 09:49:06 +08:00
|
|
|
} else {
|
|
|
|
elementLValue = LValue(); // suppress warning
|
|
|
|
elementType = cast<Expr>(S.getElement())->getType();
|
2011-02-22 15:16:58 +08:00
|
|
|
elementIsVariable = false;
|
2011-01-07 09:49:06 +08:00
|
|
|
}
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *convertedElementType = ConvertType(elementType);
|
2008-08-31 10:33:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Fetch the buffer out of the enumeration state.
|
|
|
|
// TODO: this pointer should actually be invariant between
|
|
|
|
// refreshes, which would help us do certain loop optimizations.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address StateItemsPtr = Builder.CreateStructGEP(
|
|
|
|
StatePtr, 1, getPointerSize(), "stateitems.ptr");
|
2011-01-07 09:49:06 +08:00
|
|
|
llvm::Value *EnumStateItems =
|
|
|
|
Builder.CreateLoad(StateItemsPtr, "stateitems");
|
2008-08-31 10:33:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Fetch the value at the current index from the buffer.
|
2009-09-09 23:08:12 +08:00
|
|
|
llvm::Value *CurrentItemPtr =
|
2011-01-07 09:49:06 +08:00
|
|
|
Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *CurrentItem =
|
|
|
|
Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Cast that value to the right type.
|
|
|
|
CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
|
|
|
|
"currentitem");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Make sure we have an l-value. Yes, this gets evaluated every
|
|
|
|
// time through the loop.
|
2011-06-17 14:42:21 +08:00
|
|
|
if (!elementIsVariable) {
|
2011-01-07 09:49:06 +08:00
|
|
|
elementLValue = EmitLValue(cast<Expr>(S.getElement()));
|
2011-06-25 10:11:03 +08:00
|
|
|
EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
|
2011-06-17 14:42:21 +08:00
|
|
|
} else {
|
2016-10-19 03:05:41 +08:00
|
|
|
EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue,
|
|
|
|
/*isInit*/ true);
|
2011-06-17 14:42:21 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-02-22 15:16:58 +08:00
|
|
|
// If we do have an element variable, this assignment is the end of
|
|
|
|
// its initialization.
|
|
|
|
if (elementIsVariable)
|
|
|
|
EmitAutoVarCleanups(variable);
|
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Perform the loop body, setting up break and continue labels.
|
Change PGO instrumentation to compute counts in a separate AST traversal.
Previously, we made one traversal of the AST prior to codegen to assign
counters to the ASTs and then propagated the count values during codegen. This
patch now adds a separate AST traversal prior to codegen for the
-fprofile-instr-use option to propagate the count values. The counts are then
saved in a map from which they can be retrieved during codegen.
This new approach has several advantages:
1. It gets rid of a lot of extra PGO-related code that had previously been
added to codegen.
2. It fixes a serious bug. My original implementation (which was mailed to the
list but never committed) used 3 counters for every loop. Justin improved it to
move 2 of those counters into the less-frequently executed breaks and continues,
but that turned out to produce wrong count values in some cases. The solution
requires visiting a loop body before the condition so that the count for the
condition properly includes the break and continue counts. Changing codegen to
visit a loop body first would be a fairly invasive change, but with a separate
AST traversal, it is easy to control the order of traversal. I've added a
testcase (provided by Justin) to make sure this works correctly.
3. It improves the instrumentation overhead, reducing the number of counters for
a loop from 3 to 1. We no longer need dedicated counters for breaks and
continues, since we can just use the propagated count values when visiting
breaks and continues.
To make this work, I needed to make a change to the way we count case
statements, going back to my original approach of not including the fall-through
in the counter values. This was necessary because there isn't always an AST node
that can be used to record the fall-through count. Now case statements are
handled the same as default statements, with the fall-through paths branching
over the counter increments. While I was at it, I also went back to using this
approach for do-loops -- omitting the fall-through count into the loop body
simplifies some of the calculations and make them behave the same as other
loops. Whenever we start using this instrumentation for coverage, we'll need
to add the fall-through counts into the counter values.
llvm-svn: 201528
2014-02-18 03:21:09 +08:00
|
|
|
BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
|
2011-01-07 09:49:06 +08:00
|
|
|
{
|
|
|
|
RunCleanupsScope Scope(*this);
|
|
|
|
EmitStmt(S.getBody());
|
|
|
|
}
|
2008-08-31 10:33:12 +08:00
|
|
|
BreakContinueStack.pop_back();
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Destroy the element variable now.
|
|
|
|
elementVariableScope.ForceCleanup();
|
|
|
|
|
|
|
|
// Check whether there are more elements.
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(AfterBody.getBlock());
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
|
|
|
|
|
|
|
|
// First we check in the local buffer.
|
2017-09-09 07:41:17 +08:00
|
|
|
llvm::Value *indexPlusOne =
|
|
|
|
Builder.CreateAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1));
|
2011-01-07 09:49:06 +08:00
|
|
|
|
|
|
|
// If we haven't overrun the buffer yet, we can continue.
|
2014-03-26 07:26:31 +08:00
|
|
|
// Set the branch weights based on the simplifying assumption that this is
|
|
|
|
// like a while-loop, i.e., ignoring that the false branch fetches more
|
|
|
|
// elements and then returns to the loop.
|
2015-04-24 07:06:47 +08:00
|
|
|
Builder.CreateCondBr(
|
|
|
|
Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB,
|
2015-05-02 13:00:55 +08:00
|
|
|
createProfileWeights(getProfileCount(S.getBody()), EntryCount));
|
2009-01-07 02:56:31 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
index->addIncoming(indexPlusOne, AfterBody.getBlock());
|
|
|
|
count->addIncoming(count, AfterBody.getBlock());
|
2008-08-31 10:33:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// Otherwise, we have to fetch more elements.
|
|
|
|
EmitBlock(FetchMoreBB);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
|
|
|
CountRV =
|
2017-09-09 07:41:17 +08:00
|
|
|
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
|
|
|
|
getContext().getNSUIntegerType(),
|
|
|
|
FastEnumSel, Collection, Args);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
// If we got a zero count, we're done.
|
|
|
|
llvm::Value *refetchCount = CountRV.getScalarVal();
|
|
|
|
|
|
|
|
// (note that the message send might split FetchMoreBB)
|
|
|
|
index->addIncoming(zero, Builder.GetInsertBlock());
|
|
|
|
count->addIncoming(refetchCount, Builder.GetInsertBlock());
|
|
|
|
|
|
|
|
Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
|
|
|
|
EmptyBB, LoopBodyBB);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-08-31 10:33:12 +08:00
|
|
|
// No more elements.
|
2011-01-07 09:49:06 +08:00
|
|
|
EmitBlock(EmptyBB);
|
2008-08-31 10:33:12 +08:00
|
|
|
|
2011-02-22 15:16:58 +08:00
|
|
|
if (!elementIsVariable) {
|
2008-08-31 10:33:12 +08:00
|
|
|
// If the element was not a declaration, set it to be null.
|
|
|
|
|
2011-01-07 09:49:06 +08:00
|
|
|
llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
|
|
|
|
elementLValue = EmitLValue(cast<Expr>(S.getElement()));
|
2011-06-25 10:11:03 +08:00
|
|
|
EmitStoreThroughLValue(RValue::get(null), elementLValue);
|
2008-08-31 10:33:12 +08:00
|
|
|
}
|
|
|
|
|
2011-10-14 05:45:18 +08:00
|
|
|
if (DI)
|
|
|
|
DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
|
2011-01-19 09:36:36 +08:00
|
|
|
|
2016-04-13 07:10:58 +08:00
|
|
|
ForScope.ForceCleanup();
|
2010-07-24 05:56:41 +08:00
|
|
|
EmitBlock(LoopEnd.getBlock());
|
2008-08-31 03:51:14 +08:00
|
|
|
}
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
|
2010-07-06 09:34:17 +08:00
|
|
|
CGM.getObjCRuntime().EmitTryStmt(*this, S);
|
2008-09-09 18:04:29 +08:00
|
|
|
}
|
|
|
|
|
2009-09-09 23:08:12 +08:00
|
|
|
void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
|
2008-09-09 18:04:29 +08:00
|
|
|
CGM.getObjCRuntime().EmitThrowStmt(*this, S);
|
|
|
|
}
|
|
|
|
|
2008-11-16 05:26:17 +08:00
|
|
|
void CodeGenFunction::EmitObjCAtSynchronizedStmt(
|
2009-09-09 23:08:12 +08:00
|
|
|
const ObjCAtSynchronizedStmt &S) {
|
2010-07-06 09:34:17 +08:00
|
|
|
CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
|
2008-11-16 05:26:17 +08:00
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
namespace {
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallObjCRelease final : EHScopeStack::Cleanup {
|
2011-08-04 06:24:24 +08:00
|
|
|
CallObjCRelease(llvm::Value *object) : object(object) {}
|
|
|
|
llvm::Value *object;
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2013-03-13 11:10:54 +08:00
|
|
|
// Releases at the end of the full-expression are imprecise.
|
|
|
|
CGF.EmitARCRelease(object, ARCImpreciseLifetime);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2011-09-10 14:18:15 +08:00
|
|
|
/// Produce the code for a CK_ARCConsumeObject. Does a primitive
|
2011-06-16 07:02:42 +08:00
|
|
|
/// release at the end of the full-expression.
|
|
|
|
llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
|
|
|
|
llvm::Value *object) {
|
|
|
|
// If we're in a conditional branch, we need to make the cleanup
|
2011-08-04 06:24:24 +08:00
|
|
|
// conditional.
|
|
|
|
pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object);
|
2011-06-16 07:02:42 +08:00
|
|
|
return object;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
|
|
|
|
llvm::Value *value) {
|
|
|
|
return EmitARCRetainAutorelease(type, value);
|
|
|
|
}
|
|
|
|
|
2013-03-23 10:35:54 +08:00
|
|
|
/// Given a number of pointers, inform the optimizer that they're
|
|
|
|
/// being intrinsically used up until this point in the program.
|
|
|
|
void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
|
2015-10-22 02:06:43 +08:00
|
|
|
llvm::Constant *&fn = CGM.getObjCEntrypoints().clang_arc_use;
|
2013-03-23 10:35:54 +08:00
|
|
|
if (!fn) {
|
|
|
|
llvm::FunctionType *fnType =
|
2014-08-27 14:28:36 +08:00
|
|
|
llvm::FunctionType::get(CGM.VoidTy, None, true);
|
2013-03-23 10:35:54 +08:00
|
|
|
fn = CGM.CreateRuntimeFunction(fnType, "clang.arc.use");
|
|
|
|
}
|
|
|
|
|
|
|
|
// This isn't really a "runtime" function, but as an intrinsic it
|
|
|
|
// doesn't really matter as long as we align things up.
|
|
|
|
EmitNounwindRuntimeCall(fn, values);
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
|
2017-02-12 01:24:07 +08:00
|
|
|
llvm::FunctionType *FTy,
|
|
|
|
StringRef Name) {
|
|
|
|
llvm::Constant *RTF = CGM.CreateRuntimeFunction(FTy, Name);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2017-02-12 01:24:07 +08:00
|
|
|
if (auto *F = dyn_cast<llvm::Function>(RTF)) {
|
2013-02-02 09:05:06 +08:00
|
|
|
// If the target runtime doesn't naturally support ARC, emit weak
|
|
|
|
// references to the runtime support library. We don't really
|
|
|
|
// permit this to fail, but we need a particular relocation style.
|
2016-12-15 14:59:05 +08:00
|
|
|
if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() &&
|
|
|
|
!CGM.getTriple().isOSBinFormatCOFF()) {
|
2017-02-12 01:24:07 +08:00
|
|
|
F->setLinkage(llvm::Function::ExternalWeakLinkage);
|
|
|
|
} else if (Name == "objc_retain" || Name == "objc_release") {
|
2013-02-02 08:57:44 +08:00
|
|
|
// If we have Native ARC, set nonlazybind attribute for these APIs for
|
|
|
|
// performance.
|
2017-02-12 01:24:07 +08:00
|
|
|
F->addFnAttr(llvm::Attribute::NonLazyBind);
|
2013-02-02 09:03:01 +08:00
|
|
|
}
|
2013-02-02 08:57:44 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2017-02-12 01:24:07 +08:00
|
|
|
return RTF;
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Perform an operation having the signature
|
|
|
|
/// i8* (i8*)
|
|
|
|
/// where a null input causes a no-op and returns null.
|
|
|
|
static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *value,
|
|
|
|
llvm::Constant *&fn,
|
2012-12-13 01:52:21 +08:00
|
|
|
StringRef fnName,
|
|
|
|
bool isTailCall = false) {
|
2017-02-12 01:24:07 +08:00
|
|
|
if (isa<llvm::ConstantPointerNull>(value))
|
|
|
|
return value;
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
if (!fn) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *fnType =
|
2013-03-08 05:18:31 +08:00
|
|
|
llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false);
|
2011-06-16 07:02:42 +08:00
|
|
|
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cast the argument to 'id'.
|
2016-03-22 04:50:03 +08:00
|
|
|
llvm::Type *origType = value->getType();
|
2011-06-16 07:02:42 +08:00
|
|
|
value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
|
|
|
|
|
|
|
|
// Call the function.
|
2013-03-01 03:01:20 +08:00
|
|
|
llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
|
2012-12-13 01:52:21 +08:00
|
|
|
if (isTailCall)
|
|
|
|
call->setTailCall();
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// Cast the result back to the original type.
|
|
|
|
return CGF.Builder.CreateBitCast(call, origType);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Perform an operation having the following signature:
|
|
|
|
/// i8* (i8**)
|
|
|
|
static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr,
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Constant *&fn,
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef fnName) {
|
2011-06-16 07:02:42 +08:00
|
|
|
if (!fn) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *fnType =
|
2013-03-08 05:18:31 +08:00
|
|
|
llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrPtrTy, false);
|
2011-06-16 07:02:42 +08:00
|
|
|
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cast the argument to 'id*'.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Type *origType = addr.getElementType();
|
2011-06-16 07:02:42 +08:00
|
|
|
addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
|
|
|
|
|
|
|
|
// Call the function.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// Cast the result back to a dereference of the original type.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
if (origType != CGF.Int8PtrTy)
|
|
|
|
result = CGF.Builder.CreateBitCast(result, origType);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Perform an operation having the following signature:
|
|
|
|
/// i8* (i8**, i8*)
|
|
|
|
static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr,
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *value,
|
|
|
|
llvm::Constant *&fn,
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef fnName,
|
2011-06-16 07:02:42 +08:00
|
|
|
bool ignored) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
assert(addr.getElementType() == value->getType());
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
if (!fn) {
|
2011-10-15 20:20:02 +08:00
|
|
|
llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *fnType
|
2011-06-16 07:02:42 +08:00
|
|
|
= llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
|
|
|
|
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
|
|
|
|
}
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *origType = value->getType();
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2013-03-01 03:01:20 +08:00
|
|
|
llvm::Value *args[] = {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy),
|
2013-03-01 03:01:20 +08:00
|
|
|
CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)
|
|
|
|
};
|
|
|
|
llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
if (ignored) return nullptr;
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
return CGF.Builder.CreateBitCast(result, origType);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Perform an operation having the following signature:
|
|
|
|
/// void (i8**, i8**)
|
|
|
|
static void emitARCCopyOperation(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address dst,
|
|
|
|
Address src,
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Constant *&fn,
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef fnName) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
assert(dst.getType() == src.getType());
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
if (!fn) {
|
2013-03-08 05:18:31 +08:00
|
|
|
llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrPtrTy };
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *fnType
|
2011-06-16 07:02:42 +08:00
|
|
|
= llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
|
|
|
|
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
|
|
|
|
}
|
|
|
|
|
2013-03-01 03:01:20 +08:00
|
|
|
llvm::Value *args[] = {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy),
|
|
|
|
CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy)
|
2013-03-01 03:01:20 +08:00
|
|
|
};
|
|
|
|
CGF.EmitNounwindRuntimeCall(fn, args);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Produce the code to do a retain. Based on the type, calls one of:
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call i8* \@objc_retain(i8* %value)
|
|
|
|
/// call i8* \@objc_retainBlock(i8* %value)
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
|
|
|
|
if (type->isBlockPointerType())
|
2011-10-04 14:23:45 +08:00
|
|
|
return EmitARCRetainBlock(value, /*mandatory*/ false);
|
2011-06-16 07:02:42 +08:00
|
|
|
else
|
|
|
|
return EmitARCRetainNonBlock(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Retain the given object, with normal retain semantics.
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call i8* \@objc_retain(i8* %value)
|
2016-03-22 04:50:03 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
|
|
|
|
return emitARCValueOperation(*this, value,
|
2015-10-22 02:06:43 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_retain,
|
2011-06-16 07:02:42 +08:00
|
|
|
"objc_retain");
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Retain the given block, with _Block_copy semantics.
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call i8* \@objc_retainBlock(i8* %value)
|
2011-10-04 14:23:45 +08:00
|
|
|
///
|
|
|
|
/// \param mandatory - If false, emit the call with metadata
|
|
|
|
/// indicating that it's okay for the optimizer to eliminate this call
|
|
|
|
/// if it can prove that the block never escapes except down the stack.
|
|
|
|
llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
|
|
|
|
bool mandatory) {
|
|
|
|
llvm::Value *result
|
2016-03-22 04:50:03 +08:00
|
|
|
= emitARCValueOperation(*this, value,
|
2015-10-22 02:06:43 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_retainBlock,
|
2011-10-04 14:23:45 +08:00
|
|
|
"objc_retainBlock");
|
|
|
|
|
|
|
|
// If the copy isn't mandatory, add !clang.arc.copy_on_escape to
|
|
|
|
// tell the optimizer that it doesn't need to do this copy if the
|
|
|
|
// block doesn't escape, where being passed as an argument doesn't
|
|
|
|
// count as escaping.
|
|
|
|
if (!mandatory && isa<llvm::Instruction>(result)) {
|
|
|
|
llvm::CallInst *call
|
|
|
|
= cast<llvm::CallInst>(result->stripPointerCasts());
|
2015-10-22 02:06:43 +08:00
|
|
|
assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock);
|
2011-10-04 14:23:45 +08:00
|
|
|
|
|
|
|
call->setMetadata("clang.arc.copy_on_escape",
|
2014-12-10 02:39:32 +08:00
|
|
|
llvm::MDNode::get(Builder.getContext(), None));
|
2011-10-04 14:23:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
|
2011-06-16 07:02:42 +08:00
|
|
|
// Fetch the void(void) inline asm which marks that we're going to
|
2016-01-28 02:32:30 +08:00
|
|
|
// do something with the autoreleased return value.
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::InlineAsm *&marker
|
2016-01-28 02:32:30 +08:00
|
|
|
= CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker;
|
2011-06-16 07:02:42 +08:00
|
|
|
if (!marker) {
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef assembly
|
2016-01-28 02:32:30 +08:00
|
|
|
= CGF.CGM.getTargetCodeGenInfo()
|
2011-06-16 07:02:42 +08:00
|
|
|
.getARCRetainAutoreleasedReturnValueMarker();
|
|
|
|
|
|
|
|
// If we have an empty assembly string, there's nothing to do.
|
|
|
|
if (assembly.empty()) {
|
|
|
|
|
|
|
|
// Otherwise, at -O0, build an inline asm that we're going to call
|
|
|
|
// in a moment.
|
2016-01-28 02:32:30 +08:00
|
|
|
} else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) {
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::FunctionType *type =
|
2016-01-28 02:32:30 +08:00
|
|
|
llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
|
|
|
|
|
|
|
|
// If we're at -O1 and above, we don't want to litter the code
|
|
|
|
// with this marker yet, so leave a breadcrumb for the ARC
|
|
|
|
// optimizer to pick up.
|
|
|
|
} else {
|
|
|
|
llvm::NamedMDNode *metadata =
|
2016-01-28 02:32:30 +08:00
|
|
|
CGF.CGM.getModule().getOrInsertNamedMetadata(
|
2011-06-16 07:02:42 +08:00
|
|
|
"clang.arc.retainAutoreleasedReturnValueMarker");
|
|
|
|
assert(metadata->getNumOperands() <= 1);
|
|
|
|
if (metadata->getNumOperands() == 0) {
|
2016-01-28 02:32:30 +08:00
|
|
|
auto &ctx = CGF.getLLVMContext();
|
|
|
|
metadata->addOperand(llvm::MDNode::get(ctx,
|
|
|
|
llvm::MDString::get(ctx, assembly)));
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call the marker asm if we made one, which we do only at -O0.
|
2015-05-19 06:14:03 +08:00
|
|
|
if (marker)
|
2018-03-20 03:34:39 +08:00
|
|
|
CGF.Builder.CreateCall(marker, None, CGF.getBundlesForFunclet(marker));
|
2016-01-28 02:32:30 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
/// Retain the given object which is the result of a function call.
|
|
|
|
/// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
|
|
|
|
///
|
|
|
|
/// Yes, this function name is one character away from a different
|
|
|
|
/// call with completely different semantics.
|
|
|
|
llvm::Value *
|
|
|
|
CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
|
|
|
|
emitAutoreleasedReturnValueMarker(*this);
|
2016-03-22 04:50:03 +08:00
|
|
|
return emitARCValueOperation(*this, value,
|
2016-01-28 02:32:30 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
|
2011-06-16 07:02:42 +08:00
|
|
|
"objc_retainAutoreleasedReturnValue");
|
|
|
|
}
|
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
/// Claim a possibly-autoreleased return value at +0. This is only
|
|
|
|
/// valid to do in contexts which do not rely on the retain to keep
|
2018-01-26 16:15:52 +08:00
|
|
|
/// the object valid for all of its uses; for example, when
|
2016-01-28 02:32:30 +08:00
|
|
|
/// the value is ignored, or when it is being assigned to an
|
|
|
|
/// __unsafe_unretained variable.
|
|
|
|
///
|
|
|
|
/// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value)
|
|
|
|
llvm::Value *
|
|
|
|
CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) {
|
|
|
|
emitAutoreleasedReturnValueMarker(*this);
|
2016-03-22 04:50:03 +08:00
|
|
|
return emitARCValueOperation(*this, value,
|
2016-01-28 02:32:30 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue,
|
|
|
|
"objc_unsafeClaimAutoreleasedReturnValue");
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
/// Release the given object.
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call void \@objc_release(i8* %value)
|
2013-03-13 11:10:54 +08:00
|
|
|
void CodeGenFunction::EmitARCRelease(llvm::Value *value,
|
|
|
|
ARCPreciseLifetime_t precise) {
|
2011-06-16 07:02:42 +08:00
|
|
|
if (isa<llvm::ConstantPointerNull>(value)) return;
|
|
|
|
|
2015-10-22 02:06:43 +08:00
|
|
|
llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
|
2011-06-16 07:02:42 +08:00
|
|
|
if (!fn) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *fnType =
|
2013-03-08 05:18:31 +08:00
|
|
|
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
|
2011-06-16 07:02:42 +08:00
|
|
|
fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cast the argument to 'id'.
|
|
|
|
value = Builder.CreateBitCast(value, Int8PtrTy);
|
|
|
|
|
|
|
|
// Call objc_release.
|
2013-03-01 03:01:20 +08:00
|
|
|
llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2013-03-13 11:10:54 +08:00
|
|
|
if (precise == ARCImpreciseLifetime) {
|
2011-06-16 07:02:42 +08:00
|
|
|
call->setMetadata("clang.imprecise_release",
|
2014-12-10 02:39:32 +08:00
|
|
|
llvm::MDNode::get(Builder.getContext(), None));
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-17 10:28:37 +08:00
|
|
|
/// Destroy a __strong variable.
|
|
|
|
///
|
|
|
|
/// At -O0, emit a call to store 'null' into the address;
|
|
|
|
/// instrumenting tools prefer this because the address is exposed,
|
|
|
|
/// but it's relatively cumbersome to optimize.
|
|
|
|
///
|
|
|
|
/// At -O1 and above, just load and call objc_release.
|
|
|
|
///
|
|
|
|
/// call void \@objc_storeStrong(i8** %addr, i8* null)
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::EmitARCDestroyStrong(Address addr,
|
2013-03-13 11:10:54 +08:00
|
|
|
ARCPreciseLifetime_t precise) {
|
2012-10-17 10:28:37 +08:00
|
|
|
if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *null = getNullForVariable(addr);
|
2012-10-17 10:28:37 +08:00
|
|
|
EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *value = Builder.CreateLoad(addr);
|
|
|
|
EmitARCRelease(value, precise);
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
/// Store into a strong object. Always calls this:
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call void \@objc_storeStrong(i8** %addr, i8* %value)
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *value,
|
|
|
|
bool ignored) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
assert(addr.getElementType() == value->getType());
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2015-10-22 02:06:43 +08:00
|
|
|
llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
|
2011-06-16 07:02:42 +08:00
|
|
|
if (!fn) {
|
2011-07-10 01:41:47 +08:00
|
|
|
llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *fnType
|
2011-06-16 07:02:42 +08:00
|
|
|
= llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
|
|
|
|
fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
|
|
|
|
}
|
|
|
|
|
2013-03-01 03:01:20 +08:00
|
|
|
llvm::Value *args[] = {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy),
|
2013-03-01 03:01:20 +08:00
|
|
|
Builder.CreateBitCast(value, Int8PtrTy)
|
|
|
|
};
|
|
|
|
EmitNounwindRuntimeCall(fn, args);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
if (ignored) return nullptr;
|
2011-06-16 07:02:42 +08:00
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Store into a strong object. Sometimes calls this:
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call void \@objc_storeStrong(i8** %addr, i8* %value)
|
2011-06-16 07:02:42 +08:00
|
|
|
/// Other times, breaks it down into components.
|
2011-06-25 10:11:03 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *newValue,
|
|
|
|
bool ignored) {
|
2011-06-25 10:11:03 +08:00
|
|
|
QualType type = dst.getType();
|
2011-06-16 07:02:42 +08:00
|
|
|
bool isBlock = type->isBlockPointerType();
|
|
|
|
|
|
|
|
// Use a store barrier at -O0 unless this is a block type or the
|
|
|
|
// lvalue is inadequately aligned.
|
|
|
|
if (shouldUseFusedARCCalls() &&
|
|
|
|
!isBlock &&
|
2011-12-03 12:14:32 +08:00
|
|
|
(dst.getAlignment().isZero() ||
|
|
|
|
dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
|
2011-06-16 07:02:42 +08:00
|
|
|
return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, split it out.
|
|
|
|
|
|
|
|
// Retain the new value.
|
|
|
|
newValue = EmitARCRetain(type, newValue);
|
|
|
|
|
|
|
|
// Read the old value.
|
2013-10-02 10:29:49 +08:00
|
|
|
llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation());
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// Store. We do this before the release so that any deallocs won't
|
|
|
|
// see the old value.
|
2011-06-25 10:11:03 +08:00
|
|
|
EmitStoreOfScalar(newValue, dst);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// Finally, release the old value.
|
2013-03-13 11:10:54 +08:00
|
|
|
EmitARCRelease(oldValue, dst.isARCPreciseLifetime());
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
return newValue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Autorelease the given object.
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call i8* \@objc_autorelease(i8* %value)
|
2016-03-22 04:50:03 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
|
|
|
|
return emitARCValueOperation(*this, value,
|
2015-10-22 02:06:43 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_autorelease,
|
2011-06-16 07:02:42 +08:00
|
|
|
"objc_autorelease");
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Autorelease the given object.
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call i8* \@objc_autoreleaseReturnValue(i8* %value)
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *
|
|
|
|
CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
|
2016-03-22 04:50:03 +08:00
|
|
|
return emitARCValueOperation(*this, value,
|
2015-10-22 02:06:43 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_autoreleaseReturnValue,
|
2012-12-13 01:52:21 +08:00
|
|
|
"objc_autoreleaseReturnValue",
|
|
|
|
/*isTailCall*/ true);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Do a fused retain/autorelease of the given object.
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *
|
|
|
|
CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
|
2016-03-22 04:50:03 +08:00
|
|
|
return emitARCValueOperation(*this, value,
|
2015-10-22 02:06:43 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue,
|
2012-12-13 01:52:21 +08:00
|
|
|
"objc_retainAutoreleaseReturnValue",
|
|
|
|
/*isTailCall*/ true);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Do a fused retain/autorelease of the given object.
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call i8* \@objc_retainAutorelease(i8* %value)
|
2011-06-16 07:02:42 +08:00
|
|
|
/// or
|
2012-06-22 13:41:30 +08:00
|
|
|
/// %retain = call i8* \@objc_retainBlock(i8* %value)
|
|
|
|
/// call i8* \@objc_autorelease(i8* %retain)
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
|
|
|
|
llvm::Value *value) {
|
|
|
|
if (!type->isBlockPointerType())
|
|
|
|
return EmitARCRetainAutoreleaseNonBlock(value);
|
|
|
|
|
|
|
|
if (isa<llvm::ConstantPointerNull>(value)) return value;
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *origType = value->getType();
|
2011-06-16 07:02:42 +08:00
|
|
|
value = Builder.CreateBitCast(value, Int8PtrTy);
|
2011-10-04 14:23:45 +08:00
|
|
|
value = EmitARCRetainBlock(value, /*mandatory*/ true);
|
2011-06-16 07:02:42 +08:00
|
|
|
value = EmitARCAutorelease(value);
|
|
|
|
return Builder.CreateBitCast(value, origType);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Do a fused retain/autorelease of the given object.
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call i8* \@objc_retainAutorelease(i8* %value)
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *
|
|
|
|
CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
|
2016-03-22 04:50:03 +08:00
|
|
|
return emitARCValueOperation(*this, value,
|
2015-10-22 02:06:43 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_retainAutorelease,
|
2011-06-16 07:02:42 +08:00
|
|
|
"objc_retainAutorelease");
|
|
|
|
}
|
|
|
|
|
2015-10-22 02:06:43 +08:00
|
|
|
/// i8* \@objc_loadWeak(i8** %addr)
|
|
|
|
/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
|
|
|
|
llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) {
|
|
|
|
return emitARCLoadOperation(*this, addr,
|
|
|
|
CGM.getObjCEntrypoints().objc_loadWeak,
|
|
|
|
"objc_loadWeak");
|
|
|
|
}
|
|
|
|
|
2012-06-22 13:41:30 +08:00
|
|
|
/// i8* \@objc_loadWeakRetained(i8** %addr)
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) {
|
2011-06-16 07:02:42 +08:00
|
|
|
return emitARCLoadOperation(*this, addr,
|
2015-10-22 02:06:43 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_loadWeakRetained,
|
2011-06-16 07:02:42 +08:00
|
|
|
"objc_loadWeakRetained");
|
|
|
|
}
|
|
|
|
|
2012-06-22 13:41:30 +08:00
|
|
|
/// i8* \@objc_storeWeak(i8** %addr, i8* %value)
|
2011-06-16 07:02:42 +08:00
|
|
|
/// Returns %value.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr,
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *value,
|
|
|
|
bool ignored) {
|
|
|
|
return emitARCStoreOperation(*this, addr, value,
|
2015-10-22 02:06:43 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_storeWeak,
|
2011-06-16 07:02:42 +08:00
|
|
|
"objc_storeWeak", ignored);
|
|
|
|
}
|
|
|
|
|
2012-06-22 13:41:30 +08:00
|
|
|
/// i8* \@objc_initWeak(i8** %addr, i8* %value)
|
2011-06-16 07:02:42 +08:00
|
|
|
/// Returns %value. %addr is known to not have a current weak entry.
|
|
|
|
/// Essentially equivalent to:
|
|
|
|
/// *addr = nil; objc_storeWeak(addr, value);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
|
2011-06-16 07:02:42 +08:00
|
|
|
// If we're initializing to null, just write null to memory; no need
|
|
|
|
// to get the runtime involved. But don't do this if optimization
|
|
|
|
// is enabled, because accounting for this would make the optimizer
|
|
|
|
// much more complicated.
|
|
|
|
if (isa<llvm::ConstantPointerNull>(value) &&
|
|
|
|
CGM.getCodeGenOpts().OptimizationLevel == 0) {
|
|
|
|
Builder.CreateStore(value, addr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
emitARCStoreOperation(*this, addr, value,
|
2015-10-22 02:06:43 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_initWeak,
|
2011-06-16 07:02:42 +08:00
|
|
|
"objc_initWeak", /*ignored*/ true);
|
|
|
|
}
|
|
|
|
|
2012-06-22 13:41:30 +08:00
|
|
|
/// void \@objc_destroyWeak(i8** %addr)
|
2011-06-16 07:02:42 +08:00
|
|
|
/// Essentially objc_storeWeak(addr, nil).
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
|
2015-10-22 02:06:43 +08:00
|
|
|
llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
|
2011-06-16 07:02:42 +08:00
|
|
|
if (!fn) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *fnType =
|
2013-03-08 05:18:31 +08:00
|
|
|
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrPtrTy, false);
|
2011-06-16 07:02:42 +08:00
|
|
|
fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cast the argument to 'id*'.
|
|
|
|
addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitNounwindRuntimeCall(fn, addr.getPointer());
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
2012-06-22 13:41:30 +08:00
|
|
|
/// void \@objc_moveWeak(i8** %dest, i8** %src)
|
2011-06-16 07:02:42 +08:00
|
|
|
/// Disregards the current value in %dest. Leaves %src pointing to nothing.
|
|
|
|
/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) {
|
2011-06-16 07:02:42 +08:00
|
|
|
emitARCCopyOperation(*this, dst, src,
|
2015-10-22 02:06:43 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_moveWeak,
|
2011-06-16 07:02:42 +08:00
|
|
|
"objc_moveWeak");
|
|
|
|
}
|
|
|
|
|
2012-06-22 13:41:30 +08:00
|
|
|
/// void \@objc_copyWeak(i8** %dest, i8** %src)
|
2011-06-16 07:02:42 +08:00
|
|
|
/// Disregards the current value in %dest. Essentially
|
|
|
|
/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) {
|
2011-06-16 07:02:42 +08:00
|
|
|
emitARCCopyOperation(*this, dst, src,
|
2015-10-22 02:06:43 +08:00
|
|
|
CGM.getObjCEntrypoints().objc_copyWeak,
|
2011-06-16 07:02:42 +08:00
|
|
|
"objc_copyWeak");
|
|
|
|
}
|
|
|
|
|
2018-03-20 01:38:40 +08:00
|
|
|
void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr,
|
|
|
|
Address SrcAddr) {
|
|
|
|
llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr);
|
|
|
|
Object = EmitObjCConsumeObject(Ty, Object);
|
|
|
|
EmitARCStoreWeak(DstAddr, Object, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr,
|
|
|
|
Address SrcAddr) {
|
|
|
|
llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr);
|
|
|
|
Object = EmitObjCConsumeObject(Ty, Object);
|
|
|
|
EmitARCStoreWeak(DstAddr, Object, false);
|
|
|
|
EmitARCDestroyWeak(SrcAddr);
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
/// Produce the code to do a objc_autoreleasepool_push.
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call i8* \@objc_autoreleasePoolPush(void)
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
|
2015-10-22 02:06:43 +08:00
|
|
|
llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
|
2011-06-16 07:02:42 +08:00
|
|
|
if (!fn) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *fnType =
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::FunctionType::get(Int8PtrTy, false);
|
|
|
|
fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
|
|
|
|
}
|
|
|
|
|
2013-03-01 03:01:20 +08:00
|
|
|
return EmitNounwindRuntimeCall(fn);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Produce the code to do a primitive release.
|
2012-06-22 13:41:30 +08:00
|
|
|
/// call void \@objc_autoreleasePoolPop(i8* %ptr)
|
2011-06-16 07:02:42 +08:00
|
|
|
void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
|
|
|
|
assert(value->getType() == Int8PtrTy);
|
|
|
|
|
2015-10-22 02:06:43 +08:00
|
|
|
llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
|
2011-06-16 07:02:42 +08:00
|
|
|
if (!fn) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *fnType =
|
2013-03-08 05:18:31 +08:00
|
|
|
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// We don't want to use a weak import here; instead we should not
|
|
|
|
// fall into this path.
|
|
|
|
fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
|
|
|
|
}
|
|
|
|
|
2013-04-17 05:29:40 +08:00
|
|
|
// objc_autoreleasePoolPop can throw.
|
|
|
|
EmitRuntimeCallOrInvoke(fn, value);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Produce the code to do an MRR version objc_autoreleasepool_push.
|
|
|
|
/// Which is: [[NSAutoreleasePool alloc] init];
|
|
|
|
/// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
|
|
|
|
/// init is declared as: - (id) init; in its NSObject super class.
|
|
|
|
///
|
|
|
|
llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
|
|
|
|
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
|
2013-03-01 03:01:20 +08:00
|
|
|
llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this);
|
2011-06-16 07:02:42 +08:00
|
|
|
// [NSAutoreleasePool alloc]
|
|
|
|
IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
|
|
|
|
Selector AllocSel = getContext().Selectors.getSelector(0, &II);
|
|
|
|
CallArgList Args;
|
2018-07-31 03:24:48 +08:00
|
|
|
RValue AllocRV =
|
|
|
|
Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
|
2011-06-16 07:02:42 +08:00
|
|
|
getContext().getObjCIdType(),
|
2018-07-31 03:24:48 +08:00
|
|
|
AllocSel, Receiver, Args);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// [Receiver init]
|
|
|
|
Receiver = AllocRV.getScalarVal();
|
|
|
|
II = &CGM.getContext().Idents.get("init");
|
|
|
|
Selector InitSel = getContext().Selectors.getSelector(0, &II);
|
|
|
|
RValue InitRV =
|
|
|
|
Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
|
|
|
|
getContext().getObjCIdType(),
|
2018-07-31 03:24:48 +08:00
|
|
|
InitSel, Receiver, Args);
|
2011-06-16 07:02:42 +08:00
|
|
|
return InitRV.getScalarVal();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Produce the code to do a primitive release.
|
|
|
|
/// [tmp drain];
|
|
|
|
void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
|
|
|
|
IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
|
|
|
|
Selector DrainSel = getContext().Selectors.getSelector(0, &II);
|
|
|
|
CallArgList Args;
|
|
|
|
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
|
2018-07-31 03:24:48 +08:00
|
|
|
getContext().VoidTy, DrainSel, Arg, Args);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
2011-07-09 09:37:26 +08:00
|
|
|
void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr,
|
2011-07-09 09:37:26 +08:00
|
|
|
QualType type) {
|
2013-03-13 11:10:54 +08:00
|
|
|
CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime);
|
2011-07-09 09:37:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr,
|
2011-07-09 09:37:26 +08:00
|
|
|
QualType type) {
|
2013-03-13 11:10:54 +08:00
|
|
|
CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime);
|
2011-07-09 09:37:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr,
|
2011-07-09 09:37:26 +08:00
|
|
|
QualType type) {
|
|
|
|
CGF.EmitARCDestroyWeak(addr);
|
|
|
|
}
|
|
|
|
|
2017-04-29 02:50:57 +08:00
|
|
|
void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr,
|
|
|
|
QualType type) {
|
|
|
|
llvm::Value *value = CGF.Builder.CreateLoad(addr);
|
|
|
|
CGF.EmitARCIntrinsicUse(value);
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
namespace {
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup {
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *Token;
|
|
|
|
|
|
|
|
CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-06-16 07:02:42 +08:00
|
|
|
CGF.EmitObjCAutoreleasePoolPop(Token);
|
|
|
|
}
|
|
|
|
};
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup {
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *Token;
|
|
|
|
|
|
|
|
CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2011-06-16 07:02:42 +08:00
|
|
|
CGF.EmitObjCMRRAutoreleasePoolPop(Token);
|
|
|
|
}
|
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGM.getLangOpts().ObjCAutoRefCount)
|
2011-06-16 07:02:42 +08:00
|
|
|
EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
|
|
|
|
else
|
|
|
|
EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
|
|
|
|
LValue lvalue,
|
|
|
|
QualType type) {
|
|
|
|
switch (type.getObjCLifetime()) {
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
case Qualifiers::OCL_Strong:
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
2013-10-02 10:29:49 +08:00
|
|
|
return TryEmitResult(CGF.EmitLoadOfLValue(lvalue,
|
|
|
|
SourceLocation()).getScalarVal(),
|
2011-06-16 07:02:42 +08:00
|
|
|
false);
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
|
|
|
|
true);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm_unreachable("impossible lifetime!");
|
|
|
|
}
|
|
|
|
|
|
|
|
static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
|
|
|
|
const Expr *e) {
|
|
|
|
e = e->IgnoreParens();
|
|
|
|
QualType type = e->getType();
|
|
|
|
|
2018-07-31 03:24:48 +08:00
|
|
|
// If we're loading retained from a __strong xvalue, we can avoid
|
2011-08-30 08:57:29 +08:00
|
|
|
// an extra retain/release pair by zeroing out the source of this
|
|
|
|
// "move" operation.
|
|
|
|
if (e->isXValue() &&
|
|
|
|
!type.isConstQualified() &&
|
|
|
|
type.getObjCLifetime() == Qualifiers::OCL_Strong) {
|
|
|
|
// Emit the lvalue.
|
|
|
|
LValue lv = CGF.EmitLValue(e);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2011-08-30 08:57:29 +08:00
|
|
|
// Load the object pointer.
|
2013-10-02 10:29:49 +08:00
|
|
|
llvm::Value *result = CGF.EmitLoadOfLValue(lv,
|
|
|
|
SourceLocation()).getScalarVal();
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2011-08-30 08:57:29 +08:00
|
|
|
// Set the source pointer to NULL.
|
|
|
|
CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2011-08-30 08:57:29 +08:00
|
|
|
return TryEmitResult(result, true);
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
// As a very special optimization, in ARC++, if the l-value is the
|
|
|
|
// result of a non-volatile assignment, do a simple retain of the
|
|
|
|
// result of the call to objc_storeWeak instead of reloading.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGF.getLangOpts().CPlusPlus &&
|
2011-06-16 07:02:42 +08:00
|
|
|
!type.isVolatileQualified() &&
|
|
|
|
type.getObjCLifetime() == Qualifiers::OCL_Weak &&
|
|
|
|
isa<BinaryOperator>(e) &&
|
|
|
|
cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
|
|
|
|
return TryEmitResult(CGF.EmitScalarExpr(e), false);
|
|
|
|
|
|
|
|
return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
|
|
|
|
}
|
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *value)>
|
|
|
|
ValueTransform;
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
/// Insert code immediately after a call.
|
|
|
|
static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *value,
|
|
|
|
ValueTransform doAfterCall,
|
|
|
|
ValueTransform doFallback) {
|
2011-06-16 07:02:42 +08:00
|
|
|
if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
|
|
|
|
CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
|
|
|
|
|
|
|
|
// Place the retain immediately following the call.
|
|
|
|
CGF.Builder.SetInsertPoint(call->getParent(),
|
|
|
|
++llvm::BasicBlock::iterator(call));
|
2016-01-28 02:32:30 +08:00
|
|
|
value = doAfterCall(CGF, value);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
CGF.Builder.restoreIP(ip);
|
|
|
|
return value;
|
|
|
|
} else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
|
|
|
|
CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
|
|
|
|
|
|
|
|
// Place the retain at the beginning of the normal destination block.
|
|
|
|
llvm::BasicBlock *BB = invoke->getNormalDest();
|
|
|
|
CGF.Builder.SetInsertPoint(BB, BB->begin());
|
2016-01-28 02:32:30 +08:00
|
|
|
value = doAfterCall(CGF, value);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
CGF.Builder.restoreIP(ip);
|
|
|
|
return value;
|
|
|
|
|
|
|
|
// Bitcasts can arise because of related-result returns. Rewrite
|
|
|
|
// the operand.
|
|
|
|
} else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
|
|
|
|
llvm::Value *operand = bitcast->getOperand(0);
|
2016-01-28 02:32:30 +08:00
|
|
|
operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback);
|
2011-06-16 07:02:42 +08:00
|
|
|
bitcast->setOperand(0, operand);
|
|
|
|
return bitcast;
|
|
|
|
|
|
|
|
// Generic fall-back case.
|
|
|
|
} else {
|
|
|
|
// Retain using the non-block variant: we never need to do a copy
|
|
|
|
// of a block that's been returned to us.
|
2016-01-28 02:32:30 +08:00
|
|
|
return doFallback(CGF, value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Given that the given expression is some sort of call (which does
|
|
|
|
/// not return retained), emit a retain following it.
|
|
|
|
static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF,
|
|
|
|
const Expr *e) {
|
|
|
|
llvm::Value *value = CGF.EmitScalarExpr(e);
|
|
|
|
return emitARCOperationAfterCall(CGF, value,
|
|
|
|
[](CodeGenFunction &CGF, llvm::Value *value) {
|
|
|
|
return CGF.EmitARCRetainAutoreleasedReturnValue(value);
|
|
|
|
},
|
|
|
|
[](CodeGenFunction &CGF, llvm::Value *value) {
|
|
|
|
return CGF.EmitARCRetainNonBlock(value);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Given that the given expression is some sort of call (which does
|
|
|
|
/// not return retained), perform an unsafeClaim following it.
|
|
|
|
static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF,
|
|
|
|
const Expr *e) {
|
|
|
|
llvm::Value *value = CGF.EmitScalarExpr(e);
|
|
|
|
return emitARCOperationAfterCall(CGF, value,
|
|
|
|
[](CodeGenFunction &CGF, llvm::Value *value) {
|
|
|
|
return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value);
|
|
|
|
},
|
|
|
|
[](CodeGenFunction &CGF, llvm::Value *value) {
|
|
|
|
return value;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E,
|
|
|
|
bool allowUnsafeClaim) {
|
|
|
|
if (allowUnsafeClaim &&
|
|
|
|
CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) {
|
|
|
|
return emitARCUnsafeClaimCallResult(*this, E);
|
|
|
|
} else {
|
|
|
|
llvm::Value *value = emitARCRetainCallResult(*this, E);
|
|
|
|
return EmitObjCConsumeObject(E->getType(), value);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-10 09:16:55 +08:00
|
|
|
/// Determine whether it might be important to emit a separate
|
|
|
|
/// objc_retain_block on the result of the given expression, or
|
|
|
|
/// whether it's okay to just emit it in a +1 context.
|
|
|
|
static bool shouldEmitSeparateBlockRetain(const Expr *e) {
|
|
|
|
assert(e->getType()->isBlockPointerType());
|
|
|
|
e = e->IgnoreParens();
|
|
|
|
|
|
|
|
// For future goodness, emit block expressions directly in +1
|
|
|
|
// contexts if we can.
|
|
|
|
if (isa<BlockExpr>(e))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
|
|
|
|
switch (cast->getCastKind()) {
|
|
|
|
// Emitting these operations in +1 contexts is goodness.
|
|
|
|
case CK_LValueToRValue:
|
2011-09-10 14:18:15 +08:00
|
|
|
case CK_ARCReclaimReturnedObject:
|
|
|
|
case CK_ARCConsumeObject:
|
|
|
|
case CK_ARCProduceObject:
|
2011-09-10 09:16:55 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// These operations preserve a block type.
|
|
|
|
case CK_NoOp:
|
|
|
|
case CK_BitCast:
|
|
|
|
return shouldEmitSeparateBlockRetain(cast->getSubExpr());
|
|
|
|
|
|
|
|
// These operations are known to be bad (or haven't been considered).
|
|
|
|
case CK_AnyPointerToBlockPointerCast:
|
|
|
|
default:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
namespace {
|
|
|
|
/// A CRTP base class for emitting expressions of retainable object
|
|
|
|
/// pointer type in ARC.
|
|
|
|
template <typename Impl, typename Result> class ARCExprEmitter {
|
|
|
|
protected:
|
|
|
|
CodeGenFunction &CGF;
|
|
|
|
Impl &asImpl() { return *static_cast<Impl*>(this); }
|
|
|
|
|
|
|
|
ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {}
|
|
|
|
|
|
|
|
public:
|
|
|
|
Result visit(const Expr *e);
|
|
|
|
Result visitCastExpr(const CastExpr *e);
|
|
|
|
Result visitPseudoObjectExpr(const PseudoObjectExpr *e);
|
|
|
|
Result visitBinaryOperator(const BinaryOperator *e);
|
|
|
|
Result visitBinAssign(const BinaryOperator *e);
|
|
|
|
Result visitBinAssignUnsafeUnretained(const BinaryOperator *e);
|
|
|
|
Result visitBinAssignAutoreleasing(const BinaryOperator *e);
|
|
|
|
Result visitBinAssignWeak(const BinaryOperator *e);
|
|
|
|
Result visitBinAssignStrong(const BinaryOperator *e);
|
|
|
|
|
|
|
|
// Minimal implementation:
|
|
|
|
// Result visitLValueToRValue(const Expr *e)
|
|
|
|
// Result visitConsumeObject(const Expr *e)
|
|
|
|
// Result visitExtendBlockObject(const Expr *e)
|
|
|
|
// Result visitReclaimReturnedObject(const Expr *e)
|
|
|
|
// Result visitCall(const Expr *e)
|
|
|
|
// Result visitExpr(const Expr *e)
|
|
|
|
//
|
|
|
|
// Result emitBitCast(Result result, llvm::Type *resultType)
|
|
|
|
// llvm::Value *getValueOfResult(Result result)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Try to emit a PseudoObjectExpr under special ARC rules.
|
2011-11-06 17:01:30 +08:00
|
|
|
///
|
|
|
|
/// This massively duplicates emitPseudoObjectRValue.
|
2016-01-28 02:32:30 +08:00
|
|
|
template <typename Impl, typename Result>
|
|
|
|
Result
|
|
|
|
ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) {
|
2013-01-13 03:30:44 +08:00
|
|
|
SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
|
2011-11-06 17:01:30 +08:00
|
|
|
|
|
|
|
// Find the result expression.
|
|
|
|
const Expr *resultExpr = E->getResultExpr();
|
|
|
|
assert(resultExpr);
|
2016-01-28 02:32:30 +08:00
|
|
|
Result result;
|
2011-11-06 17:01:30 +08:00
|
|
|
|
|
|
|
for (PseudoObjectExpr::const_semantics_iterator
|
|
|
|
i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
|
|
|
|
const Expr *semantic = *i;
|
|
|
|
|
|
|
|
// If this semantic expression is an opaque value, bind it
|
|
|
|
// to the result of its source expression.
|
|
|
|
if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
|
|
|
|
typedef CodeGenFunction::OpaqueValueMappingData OVMA;
|
|
|
|
OVMA opaqueData;
|
|
|
|
|
|
|
|
// If this semantic is the result of the pseudo-object
|
|
|
|
// expression, try to evaluate the source as +1.
|
|
|
|
if (ov == resultExpr) {
|
|
|
|
assert(!OVMA::shouldBindAsLValue(ov));
|
2016-01-28 02:32:30 +08:00
|
|
|
result = asImpl().visit(ov->getSourceExpr());
|
|
|
|
opaqueData = OVMA::bind(CGF, ov,
|
|
|
|
RValue::get(asImpl().getValueOfResult(result)));
|
2011-11-06 17:01:30 +08:00
|
|
|
|
|
|
|
// Otherwise, just bind it.
|
|
|
|
} else {
|
|
|
|
opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
|
|
|
|
}
|
|
|
|
opaques.push_back(opaqueData);
|
|
|
|
|
|
|
|
// Otherwise, if the expression is the result, evaluate it
|
|
|
|
// and remember the result.
|
|
|
|
} else if (semantic == resultExpr) {
|
2016-01-28 02:32:30 +08:00
|
|
|
result = asImpl().visit(semantic);
|
2011-11-06 17:01:30 +08:00
|
|
|
|
|
|
|
// Otherwise, evaluate the expression in an ignored context.
|
|
|
|
} else {
|
|
|
|
CGF.EmitIgnoredExpr(semantic);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unbind all the opaques now.
|
|
|
|
for (unsigned i = 0, e = opaques.size(); i != e; ++i)
|
|
|
|
opaques[i].unbind(CGF);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
template <typename Impl, typename Result>
|
|
|
|
Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) {
|
|
|
|
switch (e->getCastKind()) {
|
|
|
|
|
|
|
|
// No-op casts don't change the type, so we just ignore them.
|
|
|
|
case CK_NoOp:
|
|
|
|
return asImpl().visit(e->getSubExpr());
|
|
|
|
|
|
|
|
// These casts can change the type.
|
|
|
|
case CK_CPointerToObjCPointerCast:
|
|
|
|
case CK_BlockPointerToObjCPointerCast:
|
|
|
|
case CK_AnyPointerToBlockPointerCast:
|
|
|
|
case CK_BitCast: {
|
|
|
|
llvm::Type *resultType = CGF.ConvertType(e->getType());
|
|
|
|
assert(e->getSubExpr()->getType()->hasPointerRepresentation());
|
|
|
|
Result result = asImpl().visit(e->getSubExpr());
|
|
|
|
return asImpl().emitBitCast(result, resultType);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle some casts specially.
|
|
|
|
case CK_LValueToRValue:
|
|
|
|
return asImpl().visitLValueToRValue(e->getSubExpr());
|
|
|
|
case CK_ARCConsumeObject:
|
|
|
|
return asImpl().visitConsumeObject(e->getSubExpr());
|
|
|
|
case CK_ARCExtendBlockObject:
|
|
|
|
return asImpl().visitExtendBlockObject(e->getSubExpr());
|
|
|
|
case CK_ARCReclaimReturnedObject:
|
|
|
|
return asImpl().visitReclaimReturnedObject(e->getSubExpr());
|
|
|
|
|
|
|
|
// Otherwise, use the default logic.
|
|
|
|
default:
|
|
|
|
return asImpl().visitExpr(e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Impl, typename Result>
|
|
|
|
Result
|
|
|
|
ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) {
|
|
|
|
switch (e->getOpcode()) {
|
|
|
|
case BO_Comma:
|
|
|
|
CGF.EmitIgnoredExpr(e->getLHS());
|
|
|
|
CGF.EnsureInsertPoint();
|
|
|
|
return asImpl().visit(e->getRHS());
|
|
|
|
|
|
|
|
case BO_Assign:
|
|
|
|
return asImpl().visitBinAssign(e);
|
|
|
|
|
|
|
|
default:
|
|
|
|
return asImpl().visitExpr(e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Impl, typename Result>
|
|
|
|
Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) {
|
|
|
|
switch (e->getLHS()->getType().getObjCLifetime()) {
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
return asImpl().visitBinAssignUnsafeUnretained(e);
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
return asImpl().visitBinAssignWeak(e);
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
return asImpl().visitBinAssignAutoreleasing(e);
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Strong:
|
|
|
|
return asImpl().visitBinAssignStrong(e);
|
|
|
|
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
return asImpl().visitExpr(e);
|
|
|
|
}
|
|
|
|
llvm_unreachable("bad ObjC ownership qualifier");
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The default rule for __unsafe_unretained emits the RHS recursively,
|
|
|
|
/// stores into the unsafe variable, and propagates the result outward.
|
|
|
|
template <typename Impl, typename Result>
|
|
|
|
Result ARCExprEmitter<Impl,Result>::
|
|
|
|
visitBinAssignUnsafeUnretained(const BinaryOperator *e) {
|
|
|
|
// Recursively emit the RHS.
|
|
|
|
// For __block safety, do this before emitting the LHS.
|
|
|
|
Result result = asImpl().visit(e->getRHS());
|
|
|
|
|
|
|
|
// Perform the store.
|
|
|
|
LValue lvalue =
|
|
|
|
CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store);
|
|
|
|
CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)),
|
|
|
|
lvalue);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Impl, typename Result>
|
|
|
|
Result
|
|
|
|
ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) {
|
|
|
|
return asImpl().visitExpr(e);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Impl, typename Result>
|
|
|
|
Result
|
|
|
|
ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) {
|
|
|
|
return asImpl().visitExpr(e);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Impl, typename Result>
|
|
|
|
Result
|
|
|
|
ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) {
|
|
|
|
return asImpl().visitExpr(e);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The general expression-emission logic.
|
|
|
|
template <typename Impl, typename Result>
|
|
|
|
Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) {
|
2013-02-12 08:25:08 +08:00
|
|
|
// We should *never* see a nested full-expression here, because if
|
|
|
|
// we fail to emit at +1, our caller must not retain after we close
|
2016-01-28 02:32:30 +08:00
|
|
|
// out the full-expression. This isn't as important in the unsafe
|
|
|
|
// emitter.
|
2013-02-12 08:25:08 +08:00
|
|
|
assert(!isa<ExprWithCleanups>(e));
|
2011-07-27 09:07:15 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
// Look through parens, __extension__, generic selection, etc.
|
|
|
|
e = e->IgnoreParens();
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
// Handle certain kinds of casts.
|
|
|
|
if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
|
|
|
|
return asImpl().visitCastExpr(ce);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
// Handle the comma operator.
|
|
|
|
} else if (auto op = dyn_cast<BinaryOperator>(e)) {
|
|
|
|
return asImpl().visitBinaryOperator(op);
|
2011-09-10 09:16:55 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
// TODO: handle conditional operators here
|
2011-09-10 09:16:55 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
// For calls and message sends, use the retained-call logic.
|
|
|
|
// Delegate inits are a special case in that they're the only
|
|
|
|
// returns-retained expression that *isn't* surrounded by
|
|
|
|
// a consume.
|
|
|
|
} else if (isa<CallExpr>(e) ||
|
|
|
|
(isa<ObjCMessageExpr>(e) &&
|
|
|
|
!cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
|
|
|
|
return asImpl().visitCall(e);
|
2011-09-10 09:16:55 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
// Look through pseudo-object expressions.
|
|
|
|
} else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
|
|
|
|
return asImpl().visitPseudoObjectExpr(pseudo);
|
|
|
|
}
|
2011-09-10 09:16:55 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
return asImpl().visitExpr(e);
|
|
|
|
}
|
2011-09-10 09:16:55 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
namespace {
|
2011-07-07 14:58:02 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
/// An emitter for +1 results.
|
|
|
|
struct ARCRetainExprEmitter :
|
|
|
|
public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> {
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {}
|
|
|
|
|
|
|
|
llvm::Value *getValueOfResult(TryEmitResult result) {
|
|
|
|
return result.getPointer();
|
|
|
|
}
|
|
|
|
|
|
|
|
TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) {
|
|
|
|
llvm::Value *value = result.getPointer();
|
|
|
|
value = CGF.Builder.CreateBitCast(value, resultType);
|
|
|
|
result.setPointer(value);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
TryEmitResult visitLValueToRValue(const Expr *e) {
|
|
|
|
return tryEmitARCRetainLoadOfScalar(CGF, e);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// For consumptions, just emit the subexpression and thus elide
|
|
|
|
/// the retain/release pair.
|
|
|
|
TryEmitResult visitConsumeObject(const Expr *e) {
|
|
|
|
llvm::Value *result = CGF.EmitScalarExpr(e);
|
|
|
|
return TryEmitResult(result, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Block extends are net +0. Naively, we could just recurse on
|
|
|
|
/// the subexpression, but actually we need to ensure that the
|
|
|
|
/// value is copied as a block, so there's a little filter here.
|
|
|
|
TryEmitResult visitExtendBlockObject(const Expr *e) {
|
|
|
|
llvm::Value *result; // will be a +0 value
|
|
|
|
|
|
|
|
// If we can't safely assume the sub-expression will produce a
|
|
|
|
// block-copied value, emit the sub-expression at +0.
|
|
|
|
if (shouldEmitSeparateBlockRetain(e)) {
|
|
|
|
result = CGF.EmitScalarExpr(e);
|
|
|
|
|
|
|
|
// Otherwise, try to emit the sub-expression at +1 recursively.
|
|
|
|
} else {
|
|
|
|
TryEmitResult subresult = asImpl().visit(e);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
// If that produced a retained value, just use that.
|
|
|
|
if (subresult.getInt()) {
|
|
|
|
return subresult;
|
2011-11-06 17:01:30 +08:00
|
|
|
}
|
2016-01-28 02:32:30 +08:00
|
|
|
|
|
|
|
// Otherwise it's +0.
|
|
|
|
result = subresult.getPointer();
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
// Retain the object as a block.
|
|
|
|
result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true);
|
|
|
|
return TryEmitResult(result, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// For reclaims, emit the subexpression as a retained call and
|
|
|
|
/// skip the consumption.
|
|
|
|
TryEmitResult visitReclaimReturnedObject(const Expr *e) {
|
|
|
|
llvm::Value *result = emitARCRetainCallResult(CGF, e);
|
|
|
|
return TryEmitResult(result, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// When we have an undecorated call, retroactively do a claim.
|
|
|
|
TryEmitResult visitCall(const Expr *e) {
|
|
|
|
llvm::Value *result = emitARCRetainCallResult(CGF, e);
|
|
|
|
return TryEmitResult(result, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: maybe special-case visitBinAssignWeak?
|
|
|
|
|
|
|
|
TryEmitResult visitExpr(const Expr *e) {
|
|
|
|
// We didn't find an obvious production, so emit what we've got and
|
|
|
|
// tell the caller that we didn't manage to retain.
|
|
|
|
llvm::Value *result = CGF.EmitScalarExpr(e);
|
|
|
|
return TryEmitResult(result, false);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
2016-01-28 02:32:30 +08:00
|
|
|
};
|
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
static TryEmitResult
|
|
|
|
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
|
|
|
|
return ARCRetainExprEmitter(CGF).visit(e);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
|
|
|
|
LValue lvalue,
|
|
|
|
QualType type) {
|
|
|
|
TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
|
|
|
|
llvm::Value *value = result.getPointer();
|
|
|
|
if (!result.getInt())
|
|
|
|
value = CGF.EmitARCRetain(type, value);
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// EmitARCRetainScalarExpr - Semantically equivalent to
|
|
|
|
/// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
|
|
|
|
/// best-effort attempt to peephole expressions that naturally produce
|
|
|
|
/// retained objects.
|
|
|
|
llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
|
2013-02-12 08:25:08 +08:00
|
|
|
// The retain needs to happen within the full-expression.
|
|
|
|
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
|
|
|
|
enterFullExpression(cleanups);
|
|
|
|
RunCleanupsScope scope(*this);
|
|
|
|
return EmitARCRetainScalarExpr(cleanups->getSubExpr());
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
|
|
|
|
llvm::Value *value = result.getPointer();
|
|
|
|
if (!result.getInt())
|
|
|
|
value = EmitARCRetain(e->getType(), value);
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *
|
|
|
|
CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
|
2013-02-12 08:25:08 +08:00
|
|
|
// The retain needs to happen within the full-expression.
|
|
|
|
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
|
|
|
|
enterFullExpression(cleanups);
|
|
|
|
RunCleanupsScope scope(*this);
|
|
|
|
return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr());
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
|
|
|
|
llvm::Value *value = result.getPointer();
|
|
|
|
if (result.getInt())
|
|
|
|
value = EmitARCAutorelease(value);
|
|
|
|
else
|
|
|
|
value = EmitARCRetainAutorelease(e->getType(), value);
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2011-10-04 14:23:45 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
|
|
|
|
llvm::Value *result;
|
|
|
|
bool doRetain;
|
|
|
|
|
|
|
|
if (shouldEmitSeparateBlockRetain(e)) {
|
|
|
|
result = EmitScalarExpr(e);
|
|
|
|
doRetain = true;
|
|
|
|
} else {
|
|
|
|
TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e);
|
|
|
|
result = subresult.getPointer();
|
|
|
|
doRetain = !subresult.getInt();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (doRetain)
|
|
|
|
result = EmitARCRetainBlock(result, /*mandatory*/ true);
|
|
|
|
return EmitObjCConsumeObject(e->getType(), result);
|
|
|
|
}
|
|
|
|
|
2011-10-01 18:32:24 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
|
|
|
|
// In ARC, retain and autorelease the expression.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getLangOpts().ObjCAutoRefCount) {
|
2011-10-01 18:32:24 +08:00
|
|
|
// Do so before running any cleanups for the full-expression.
|
2013-02-12 08:25:08 +08:00
|
|
|
// EmitARCRetainAutoreleaseScalarExpr does this for us.
|
2011-10-01 18:32:24 +08:00
|
|
|
return EmitARCRetainAutoreleaseScalarExpr(expr);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, use the normal scalar-expression emission. The
|
|
|
|
// exception machinery doesn't do anything special with the
|
|
|
|
// exception like retaining it, so there's no safety associated with
|
|
|
|
// only running cleanups after the throw has started, and when it
|
|
|
|
// matters it tends to be substantially inferior code.
|
|
|
|
return EmitScalarExpr(expr);
|
|
|
|
}
|
|
|
|
|
2016-01-28 02:32:30 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
/// An emitter for assigning into an __unsafe_unretained context.
|
|
|
|
struct ARCUnsafeUnretainedExprEmitter :
|
|
|
|
public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> {
|
|
|
|
|
|
|
|
ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {}
|
|
|
|
|
|
|
|
llvm::Value *getValueOfResult(llvm::Value *value) {
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) {
|
|
|
|
return CGF.Builder.CreateBitCast(value, resultType);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *visitLValueToRValue(const Expr *e) {
|
|
|
|
return CGF.EmitScalarExpr(e);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// For consumptions, just emit the subexpression and perform the
|
|
|
|
/// consumption like normal.
|
|
|
|
llvm::Value *visitConsumeObject(const Expr *e) {
|
|
|
|
llvm::Value *value = CGF.EmitScalarExpr(e);
|
|
|
|
return CGF.EmitObjCConsumeObject(e->getType(), value);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// No special logic for block extensions. (This probably can't
|
|
|
|
/// actually happen in this emitter, though.)
|
|
|
|
llvm::Value *visitExtendBlockObject(const Expr *e) {
|
|
|
|
return CGF.EmitARCExtendBlockObject(e);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// For reclaims, perform an unsafeClaim if that's enabled.
|
|
|
|
llvm::Value *visitReclaimReturnedObject(const Expr *e) {
|
|
|
|
return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// When we have an undecorated call, just emit it without adding
|
|
|
|
/// the unsafeClaim.
|
|
|
|
llvm::Value *visitCall(const Expr *e) {
|
|
|
|
return CGF.EmitScalarExpr(e);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Just do normal scalar emission in the default case.
|
|
|
|
llvm::Value *visitExpr(const Expr *e) {
|
|
|
|
return CGF.EmitScalarExpr(e);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF,
|
|
|
|
const Expr *e) {
|
|
|
|
return ARCUnsafeUnretainedExprEmitter(CGF).visit(e);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to
|
|
|
|
/// immediately releasing the resut of EmitARCRetainScalarExpr, but
|
|
|
|
/// avoiding any spurious retains, including by performing reclaims
|
|
|
|
/// with objc_unsafeClaimAutoreleasedReturnValue.
|
|
|
|
llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) {
|
|
|
|
// Look through full-expressions.
|
|
|
|
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
|
|
|
|
enterFullExpression(cleanups);
|
|
|
|
RunCleanupsScope scope(*this);
|
|
|
|
return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr());
|
|
|
|
}
|
|
|
|
|
|
|
|
return emitARCUnsafeUnretainedScalarExpr(*this, e);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::pair<LValue,llvm::Value*>
|
|
|
|
CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e,
|
|
|
|
bool ignored) {
|
|
|
|
// Evaluate the RHS first. If we're ignoring the result, assume
|
|
|
|
// that we can emit at an unsafe +0.
|
|
|
|
llvm::Value *value;
|
|
|
|
if (ignored) {
|
|
|
|
value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS());
|
|
|
|
} else {
|
|
|
|
value = EmitScalarExpr(e->getRHS());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the LHS and perform the store.
|
|
|
|
LValue lvalue = EmitLValue(e->getLHS());
|
|
|
|
EmitStoreOfScalar(value, lvalue);
|
|
|
|
|
|
|
|
return std::pair<LValue,llvm::Value*>(std::move(lvalue), value);
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
std::pair<LValue,llvm::Value*>
|
|
|
|
CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
|
|
|
|
bool ignored) {
|
|
|
|
// Evaluate the RHS first.
|
|
|
|
TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
|
|
|
|
llvm::Value *value = result.getPointer();
|
|
|
|
|
2011-07-28 15:23:35 +08:00
|
|
|
bool hasImmediateRetain = result.getInt();
|
|
|
|
|
|
|
|
// If we didn't emit a retained object, and the l-value is of block
|
|
|
|
// type, then we need to emit the block-retain immediately in case
|
|
|
|
// it invalidates the l-value.
|
|
|
|
if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
|
2011-10-04 14:23:45 +08:00
|
|
|
value = EmitARCRetainBlock(value, /*mandatory*/ false);
|
2011-07-28 15:23:35 +08:00
|
|
|
hasImmediateRetain = true;
|
|
|
|
}
|
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
LValue lvalue = EmitLValue(e->getLHS());
|
|
|
|
|
|
|
|
// If the RHS was emitted retained, expand this.
|
2011-07-28 15:23:35 +08:00
|
|
|
if (hasImmediateRetain) {
|
2013-10-02 10:33:11 +08:00
|
|
|
llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation());
|
2011-12-03 12:14:32 +08:00
|
|
|
EmitStoreOfScalar(value, lvalue);
|
2013-03-13 11:10:54 +08:00
|
|
|
EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime());
|
2011-06-16 07:02:42 +08:00
|
|
|
} else {
|
2011-06-25 10:11:03 +08:00
|
|
|
value = EmitARCStoreStrong(lvalue, value, ignored);
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return std::pair<LValue,llvm::Value*>(lvalue, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::pair<LValue,llvm::Value*>
|
|
|
|
CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
|
|
|
|
llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
|
|
|
|
LValue lvalue = EmitLValue(e->getLHS());
|
|
|
|
|
2011-12-03 12:14:32 +08:00
|
|
|
EmitStoreOfScalar(value, lvalue);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
return std::pair<LValue,llvm::Value*>(lvalue, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
|
2012-03-30 01:31:31 +08:00
|
|
|
const ObjCAutoreleasePoolStmt &ARPS) {
|
2011-06-16 07:02:42 +08:00
|
|
|
const Stmt *subStmt = ARPS.getSubStmt();
|
|
|
|
const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
|
|
|
|
|
|
|
|
CGDebugInfo *DI = getDebugInfo();
|
2011-10-14 05:45:18 +08:00
|
|
|
if (DI)
|
|
|
|
DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// Keep track of the current cleanup stack depth.
|
|
|
|
RunCleanupsScope Scope(*this);
|
2012-08-21 10:47:43 +08:00
|
|
|
if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
|
2011-06-16 07:02:42 +08:00
|
|
|
llvm::Value *token = EmitObjCAutoreleasePoolPush();
|
|
|
|
EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
|
|
|
|
} else {
|
|
|
|
llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
|
|
|
|
EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
|
|
|
|
}
|
|
|
|
|
2014-03-17 22:19:37 +08:00
|
|
|
for (const auto *I : S.body())
|
|
|
|
EmitStmt(I);
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2011-10-14 05:45:18 +08:00
|
|
|
if (DI)
|
|
|
|
DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
2011-06-25 07:21:27 +08:00
|
|
|
|
|
|
|
/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
|
|
|
|
/// make sure it survives garbage collection until this point.
|
|
|
|
void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
|
|
|
|
// We just use an inline assembly.
|
|
|
|
llvm::FunctionType *extenderType
|
2012-02-17 11:33:10 +08:00
|
|
|
= llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
|
2011-06-25 07:21:27 +08:00
|
|
|
llvm::Value *extender
|
|
|
|
= llvm::InlineAsm::get(extenderType,
|
|
|
|
/* assembly */ "",
|
|
|
|
/* constraints */ "r",
|
|
|
|
/* side effects */ true);
|
|
|
|
|
|
|
|
object = Builder.CreateBitCast(object, VoidPtrTy);
|
2013-03-01 03:01:20 +08:00
|
|
|
EmitNounwindRuntimeCall(extender, object);
|
2011-06-25 07:21:27 +08:00
|
|
|
}
|
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
|
2012-01-06 08:29:35 +08:00
|
|
|
/// non-trivial copy assignment function, produce following helper function.
|
|
|
|
/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
|
|
|
|
///
|
|
|
|
llvm::Constant *
|
2012-01-10 08:37:01 +08:00
|
|
|
CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
|
|
|
|
const ObjCPropertyImplDecl *PID) {
|
2012-06-20 14:18:46 +08:00
|
|
|
if (!getLangOpts().CPlusPlus ||
|
2012-12-18 12:29:34 +08:00
|
|
|
!getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2012-01-06 08:29:35 +08:00
|
|
|
QualType Ty = PID->getPropertyIvarDecl()->getType();
|
|
|
|
if (!Ty->isRecordType())
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2012-01-06 08:29:35 +08:00
|
|
|
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
|
2012-01-10 08:37:01 +08:00
|
|
|
if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
|
|
|
llvm::Constant *HelperFn = nullptr;
|
2012-01-10 08:37:01 +08:00
|
|
|
if (hasTrivialSetExpr(PID))
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2012-01-10 08:37:01 +08:00
|
|
|
assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
|
|
|
|
if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
|
|
|
|
return HelperFn;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-06 08:29:35 +08:00
|
|
|
ASTContext &C = getContext();
|
|
|
|
IdentifierInfo *II
|
2012-01-10 08:37:01 +08:00
|
|
|
= &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
|
2012-01-06 08:29:35 +08:00
|
|
|
FunctionDecl *FD = FunctionDecl::Create(C,
|
|
|
|
C.getTranslationUnitDecl(),
|
|
|
|
SourceLocation(),
|
2014-05-21 13:09:00 +08:00
|
|
|
SourceLocation(), II, C.VoidTy,
|
|
|
|
nullptr, SC_Static,
|
2012-01-06 08:29:35 +08:00
|
|
|
false,
|
2012-04-12 10:16:49 +08:00
|
|
|
false);
|
2014-05-21 13:09:00 +08:00
|
|
|
|
2012-01-06 08:29:35 +08:00
|
|
|
QualType DestTy = C.getPointerType(Ty);
|
|
|
|
QualType SrcTy = Ty;
|
|
|
|
SrcTy.addConst();
|
|
|
|
SrcTy = C.getPointerType(SrcTy);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-06 08:29:35 +08:00
|
|
|
FunctionArgList args;
|
2017-06-09 21:40:18 +08:00
|
|
|
ImplicitParamDecl DstDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
|
|
|
|
DestTy, ImplicitParamDecl::Other);
|
|
|
|
args.push_back(&DstDecl);
|
|
|
|
ImplicitParamDecl SrcDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
|
|
|
|
SrcTy, ImplicitParamDecl::Other);
|
|
|
|
args.push_back(&SrcDecl);
|
2014-02-01 06:54:50 +08:00
|
|
|
|
2016-03-11 12:30:31 +08:00
|
|
|
const CGFunctionInfo &FI =
|
|
|
|
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
|
2014-02-01 06:54:50 +08:00
|
|
|
|
2012-02-17 11:33:10 +08:00
|
|
|
llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-06 08:29:35 +08:00
|
|
|
llvm::Function *Fn =
|
|
|
|
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
|
2012-03-30 01:31:31 +08:00
|
|
|
"__assign_helper_atomic_property_",
|
|
|
|
&CGM.getModule());
|
2015-10-28 10:30:47 +08:00
|
|
|
|
2018-03-01 07:46:35 +08:00
|
|
|
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
|
2015-10-28 10:30:47 +08:00
|
|
|
|
2014-04-11 09:13:04 +08:00
|
|
|
StartFunction(FD, C.VoidTy, Fn, FI, args);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2017-06-09 21:40:18 +08:00
|
|
|
DeclRefExpr DstExpr(&DstDecl, false, DestTy,
|
2012-03-10 17:33:50 +08:00
|
|
|
VK_RValue, SourceLocation());
|
|
|
|
UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
|
2018-01-09 21:07:03 +08:00
|
|
|
VK_LValue, OK_Ordinary, SourceLocation(), false);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2017-06-09 21:40:18 +08:00
|
|
|
DeclRefExpr SrcExpr(&SrcDecl, false, SrcTy,
|
2012-03-10 17:33:50 +08:00
|
|
|
VK_RValue, SourceLocation());
|
|
|
|
UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
|
2018-01-09 21:07:03 +08:00
|
|
|
VK_LValue, OK_Ordinary, SourceLocation(), false);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-03-10 17:33:50 +08:00
|
|
|
Expr *Args[2] = { &DST, &SRC };
|
2012-01-10 08:37:01 +08:00
|
|
|
CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
|
2012-03-10 17:33:50 +08:00
|
|
|
CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
|
2012-08-24 19:54:20 +08:00
|
|
|
Args, DestTy->getPointeeType(),
|
2017-03-28 03:17:25 +08:00
|
|
|
VK_LValue, SourceLocation(), FPOptions());
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-03-10 17:33:50 +08:00
|
|
|
EmitStmt(&TheCall);
|
2012-01-06 08:29:35 +08:00
|
|
|
|
|
|
|
FinishFunction();
|
2012-01-07 06:33:54 +08:00
|
|
|
HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
|
2012-01-10 08:37:01 +08:00
|
|
|
CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
|
2012-01-07 06:33:54 +08:00
|
|
|
return HelperFn;
|
2012-01-10 08:37:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Constant *
|
|
|
|
CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
|
|
|
|
const ObjCPropertyImplDecl *PID) {
|
2012-06-20 14:18:46 +08:00
|
|
|
if (!getLangOpts().CPlusPlus ||
|
2012-12-18 12:29:34 +08:00
|
|
|
!getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2012-01-10 08:37:01 +08:00
|
|
|
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
|
|
|
|
QualType Ty = PD->getType();
|
|
|
|
if (!Ty->isRecordType())
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2012-01-10 08:37:01 +08:00
|
|
|
if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
|
|
|
llvm::Constant *HelperFn = nullptr;
|
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
if (hasTrivialGetExpr(PID))
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2012-01-10 08:37:01 +08:00
|
|
|
assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
|
|
|
|
if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
|
|
|
|
return HelperFn;
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
ASTContext &C = getContext();
|
|
|
|
IdentifierInfo *II
|
|
|
|
= &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
|
|
|
|
FunctionDecl *FD = FunctionDecl::Create(C,
|
|
|
|
C.getTranslationUnitDecl(),
|
|
|
|
SourceLocation(),
|
2014-05-21 13:09:00 +08:00
|
|
|
SourceLocation(), II, C.VoidTy,
|
|
|
|
nullptr, SC_Static,
|
2012-01-10 08:37:01 +08:00
|
|
|
false,
|
2012-04-12 10:16:49 +08:00
|
|
|
false);
|
2014-05-21 13:09:00 +08:00
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
QualType DestTy = C.getPointerType(Ty);
|
|
|
|
QualType SrcTy = Ty;
|
|
|
|
SrcTy.addConst();
|
|
|
|
SrcTy = C.getPointerType(SrcTy);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
FunctionArgList args;
|
2017-06-09 21:40:18 +08:00
|
|
|
ImplicitParamDecl DstDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
|
|
|
|
DestTy, ImplicitParamDecl::Other);
|
|
|
|
args.push_back(&DstDecl);
|
|
|
|
ImplicitParamDecl SrcDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
|
|
|
|
SrcTy, ImplicitParamDecl::Other);
|
|
|
|
args.push_back(&SrcDecl);
|
2014-02-01 06:54:50 +08:00
|
|
|
|
2016-03-11 12:30:31 +08:00
|
|
|
const CGFunctionInfo &FI =
|
|
|
|
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
|
2014-02-01 06:54:50 +08:00
|
|
|
|
2012-02-17 11:33:10 +08:00
|
|
|
llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
llvm::Function *Fn =
|
|
|
|
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
|
|
|
|
"__copy_helper_atomic_property_", &CGM.getModule());
|
2018-03-01 07:46:35 +08:00
|
|
|
|
|
|
|
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
|
2015-10-28 10:30:47 +08:00
|
|
|
|
2014-04-11 09:13:04 +08:00
|
|
|
StartFunction(FD, C.VoidTy, Fn, FI, args);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2017-06-09 21:40:18 +08:00
|
|
|
DeclRefExpr SrcExpr(&SrcDecl, false, SrcTy,
|
2012-01-10 08:37:01 +08:00
|
|
|
VK_RValue, SourceLocation());
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-03-10 17:33:50 +08:00
|
|
|
UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
|
2018-01-09 21:07:03 +08:00
|
|
|
VK_LValue, OK_Ordinary, SourceLocation(), false);
|
2018-07-31 03:24:48 +08:00
|
|
|
|
|
|
|
CXXConstructExpr *CXXConstExpr =
|
2012-01-10 08:37:01 +08:00
|
|
|
cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
SmallVector<Expr*, 4> ConstructorArgs;
|
2012-03-10 17:33:50 +08:00
|
|
|
ConstructorArgs.push_back(&SRC);
|
2015-06-12 23:31:50 +08:00
|
|
|
ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()),
|
|
|
|
CXXConstExpr->arg_end());
|
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
CXXConstructExpr *TheCXXConstructExpr =
|
|
|
|
CXXConstructExpr::Create(C, Ty, SourceLocation(),
|
|
|
|
CXXConstExpr->getConstructor(),
|
|
|
|
CXXConstExpr->isElidable(),
|
2012-08-24 19:54:20 +08:00
|
|
|
ConstructorArgs,
|
Represent C++ direct initializers as ParenListExprs before semantic analysis
instead of having a special-purpose function.
- ActOnCXXDirectInitializer, which was mostly duplication of
AddInitializerToDecl (leading e.g. to PR10620, which Eli fixed a few days
ago), is dropped completely.
- MultiInitializer, which was an ugly hack I added, is dropped again.
- We now have the infrastructure in place to distinguish between
int x = {1};
int x({1});
int x{1};
-- VarDecl now has getInitStyle(), which indicates which of the above was used.
-- CXXConstructExpr now has a flag to indicate that it represents list-
initialization, although this is not yet used.
- InstantiateInitializer was renamed to SubstInitializer and simplified.
- ActOnParenOrParenListExpr has been replaced by ActOnParenListExpr, which
always produces a ParenListExpr. Placed that so far failed to convert that
back to a ParenExpr containing comma operators have been fixed. I'm pretty
sure I could have made a crashing test case before this.
The end result is a (I hope) considerably cleaner design of initializers.
More importantly, the fact that I can now distinguish between the various
initialization kinds means that I can get the tricky generalized initializer
test cases Johannes Schaub supplied to work. (This is not yet done.)
This commit passed self-host, with the resulting compiler passing the tests. I
hope it doesn't break more complicated code. It's a pretty big change, but one
that I feel is necessary.
llvm-svn: 150318
2012-02-12 07:51:47 +08:00
|
|
|
CXXConstExpr->hadMultipleCandidates(),
|
|
|
|
CXXConstExpr->isListInitialization(),
|
2014-07-17 13:12:35 +08:00
|
|
|
CXXConstExpr->isStdInitListInitialization(),
|
2012-01-10 08:37:01 +08:00
|
|
|
CXXConstExpr->requiresZeroInitialization(),
|
2012-03-30 01:31:31 +08:00
|
|
|
CXXConstExpr->getConstructionKind(),
|
|
|
|
SourceRange());
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2017-06-09 21:40:18 +08:00
|
|
|
DeclRefExpr DstExpr(&DstDecl, false, DestTy,
|
2012-03-10 17:33:50 +08:00
|
|
|
VK_RValue, SourceLocation());
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-03-10 17:33:50 +08:00
|
|
|
RValue DV = EmitAnyExpr(&DstExpr);
|
2012-03-30 01:31:31 +08:00
|
|
|
CharUnits Alignment
|
|
|
|
= getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
|
2018-07-31 03:24:48 +08:00
|
|
|
EmitAggExpr(TheCXXConstructExpr,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment),
|
|
|
|
Qualifiers(),
|
2012-01-10 08:37:01 +08:00
|
|
|
AggValueSlot::IsDestructed,
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2018-04-06 04:52:58 +08:00
|
|
|
AggValueSlot::IsNotAliased,
|
|
|
|
AggValueSlot::DoesNotOverlap));
|
2018-07-31 03:24:48 +08:00
|
|
|
|
2012-01-10 08:37:01 +08:00
|
|
|
FinishFunction();
|
|
|
|
HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
|
|
|
|
CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
|
|
|
|
return HelperFn;
|
2012-01-06 08:29:35 +08:00
|
|
|
}
|
|
|
|
|
2012-02-28 09:08:45 +08:00
|
|
|
llvm::Value *
|
|
|
|
CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
|
|
|
|
// Get selectors for retain/autorelease.
|
2012-03-02 06:52:28 +08:00
|
|
|
IdentifierInfo *CopyID = &getContext().Idents.get("copy");
|
|
|
|
Selector CopySelector =
|
|
|
|
getContext().Selectors.getNullarySelector(CopyID);
|
2012-02-28 09:08:45 +08:00
|
|
|
IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
|
|
|
|
Selector AutoreleaseSelector =
|
|
|
|
getContext().Selectors.getNullarySelector(AutoreleaseID);
|
|
|
|
|
|
|
|
// Emit calls to retain/autorelease.
|
|
|
|
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
|
|
|
|
llvm::Value *Val = Block;
|
|
|
|
RValue Result;
|
|
|
|
Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
|
2012-03-02 06:52:28 +08:00
|
|
|
Ty, CopySelector,
|
2014-05-21 13:09:00 +08:00
|
|
|
Val, CallArgList(), nullptr, nullptr);
|
2012-02-28 09:08:45 +08:00
|
|
|
Val = Result.getScalarVal();
|
|
|
|
Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
|
|
|
|
Ty, AutoreleaseSelector,
|
2014-05-21 13:09:00 +08:00
|
|
|
Val, CallArgList(), nullptr, nullptr);
|
2012-02-28 09:08:45 +08:00
|
|
|
Val = Result.getScalarVal();
|
|
|
|
return Val;
|
|
|
|
}
|
|
|
|
|
2017-02-24 05:08:08 +08:00
|
|
|
llvm::Value *
|
|
|
|
CodeGenFunction::EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args) {
|
|
|
|
assert(Args.size() == 3 && "Expected 3 argument here!");
|
|
|
|
|
|
|
|
if (!CGM.IsOSVersionAtLeastFn) {
|
|
|
|
llvm::FunctionType *FTy =
|
|
|
|
llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false);
|
|
|
|
CGM.IsOSVersionAtLeastFn =
|
|
|
|
CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast");
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CallRes =
|
|
|
|
EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args);
|
|
|
|
|
|
|
|
return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty));
|
|
|
|
}
|
2012-01-06 08:29:35 +08:00
|
|
|
|
2017-03-23 19:14:27 +08:00
|
|
|
void CodeGenModule::emitAtAvailableLinkGuard() {
|
|
|
|
if (!IsOSVersionAtLeastFn)
|
|
|
|
return;
|
|
|
|
// @available requires CoreFoundation only on Darwin.
|
|
|
|
if (!Target.getTriple().isOSDarwin())
|
|
|
|
return;
|
|
|
|
// Add -framework CoreFoundation to the linker commands. We still want to
|
|
|
|
// emit the core foundation reference down below because otherwise if
|
|
|
|
// CoreFoundation is not used in the code, the linker won't link the
|
|
|
|
// framework.
|
|
|
|
auto &Context = getLLVMContext();
|
|
|
|
llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"),
|
|
|
|
llvm::MDString::get(Context, "CoreFoundation")};
|
|
|
|
LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args));
|
|
|
|
// Emit a reference to a symbol from CoreFoundation to ensure that
|
|
|
|
// CoreFoundation is linked into the final binary.
|
|
|
|
llvm::FunctionType *FTy =
|
|
|
|
llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false);
|
|
|
|
llvm::Constant *CFFunc =
|
|
|
|
CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber");
|
|
|
|
|
|
|
|
llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false);
|
|
|
|
llvm::Function *CFLinkCheckFunc = cast<llvm::Function>(CreateBuiltinFunction(
|
|
|
|
CheckFTy, "__clang_at_available_requires_core_foundation_framework"));
|
|
|
|
CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
|
|
|
|
CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility);
|
|
|
|
CodeGenFunction CGF(*this);
|
|
|
|
CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc));
|
|
|
|
CGF.EmitNounwindRuntimeCall(CFFunc, llvm::Constant::getNullValue(VoidPtrTy));
|
|
|
|
CGF.Builder.CreateUnreachable();
|
|
|
|
addCompilerUsedGlobal(CFLinkCheckFunc);
|
|
|
|
}
|
|
|
|
|
2015-10-20 21:23:58 +08:00
|
|
|
CGObjCRuntime::~CGObjCRuntime() {}
|