2007-06-02 02:02:12 +08:00
|
|
|
//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-06-02 02:02:12 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code to emit Expr nodes as LLVM code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CodeGenFunction.h"
|
2007-06-20 12:44:43 +08:00
|
|
|
#include "CodeGenModule.h"
|
2008-09-09 05:33:45 +08:00
|
|
|
#include "CGCall.h"
|
2010-08-31 15:33:07 +08:00
|
|
|
#include "CGCXXABI.h"
|
2011-03-05 02:54:42 +08:00
|
|
|
#include "CGDebugInfo.h"
|
2010-03-31 09:09:11 +08:00
|
|
|
#include "CGRecordLayout.h"
|
2008-08-13 08:59:25 +08:00
|
|
|
#include "CGObjCRuntime.h"
|
2011-09-21 16:08:30 +08:00
|
|
|
#include "TargetInfo.h"
|
2008-08-11 13:00:27 +08:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2008-08-11 13:35:13 +08:00
|
|
|
#include "clang/AST/DeclObjC.h"
|
2010-06-16 07:19:56 +08:00
|
|
|
#include "clang/Frontend/CodeGenOptions.h"
|
2011-07-07 11:54:51 +08:00
|
|
|
#include "llvm/Intrinsics.h"
|
2011-10-28 03:19:51 +08:00
|
|
|
#include "llvm/LLVMContext.h"
|
2008-05-18 04:03:47 +08:00
|
|
|
#include "llvm/Target/TargetData.h"
|
2007-06-02 02:02:12 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2007-06-03 03:33:17 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Miscellaneous Helper Methods
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
|
|
|
|
unsigned addressSpace =
|
|
|
|
cast<llvm::PointerType>(value->getType())->getAddressSpace();
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::PointerType *destType = Int8PtrTy;
|
2011-02-08 16:22:06 +08:00
|
|
|
if (addressSpace)
|
|
|
|
destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
|
|
|
|
|
|
|
|
if (value->getType() == destType) return value;
|
|
|
|
return Builder.CreateBitCast(value, destType);
|
|
|
|
}
|
|
|
|
|
2007-06-23 05:44:33 +08:00
|
|
|
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
|
|
|
|
/// block.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
|
2011-07-23 18:55:15 +08:00
|
|
|
const Twine &Name) {
|
2009-03-22 08:24:14 +08:00
|
|
|
if (!Builder.isNamePreserving())
|
2009-10-19 09:21:05 +08:00
|
|
|
return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
|
2009-10-13 06:29:02 +08:00
|
|
|
return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
|
2007-06-23 05:44:33 +08:00
|
|
|
}
|
2007-06-06 04:53:16 +08:00
|
|
|
|
2010-04-22 09:10:34 +08:00
|
|
|
void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
|
|
|
|
llvm::Value *Init) {
|
|
|
|
llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
|
|
|
|
llvm::BasicBlock *Block = AllocaInsertPt->getParent();
|
|
|
|
Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
|
|
|
|
}
|
|
|
|
|
2010-07-06 04:21:00 +08:00
|
|
|
llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
|
2011-07-23 18:55:15 +08:00
|
|
|
const Twine &Name) {
|
2010-02-17 03:44:13 +08:00
|
|
|
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
|
|
|
|
// FIXME: Should we prefer the preferred type alignment here?
|
|
|
|
CharUnits Align = getContext().getTypeAlignInChars(Ty);
|
|
|
|
Alloc->setAlignment(Align.getQuantity());
|
|
|
|
return Alloc;
|
|
|
|
}
|
|
|
|
|
2010-07-06 04:21:00 +08:00
|
|
|
llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
|
2011-07-23 18:55:15 +08:00
|
|
|
const Twine &Name) {
|
2010-02-09 10:48:28 +08:00
|
|
|
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
|
|
|
|
// FIXME: Should we prefer the preferred type alignment here?
|
|
|
|
CharUnits Align = getContext().getTypeAlignInChars(Ty);
|
|
|
|
Alloc->setAlignment(Align.getQuantity());
|
|
|
|
return Alloc;
|
|
|
|
}
|
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
|
|
|
|
/// expression and compare the result against zero, returning an Int1Ty value.
|
2007-06-16 07:05:46 +08:00
|
|
|
llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
|
2010-08-23 09:21:21 +08:00
|
|
|
if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
|
2010-08-22 18:59:02 +08:00
|
|
|
llvm::Value *MemPtr = EmitScalarExpr(E);
|
2011-02-08 16:22:06 +08:00
|
|
|
return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
|
2009-12-11 17:26:29 +08:00
|
|
|
}
|
2010-08-23 09:21:21 +08:00
|
|
|
|
|
|
|
QualType BoolTy = getContext().BoolTy;
|
2008-04-05 00:54:41 +08:00
|
|
|
if (!E->getType()->isAnyComplexType())
|
2007-08-27 00:46:58 +08:00
|
|
|
return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
|
2007-06-06 04:53:16 +08:00
|
|
|
|
2007-08-27 00:46:58 +08:00
|
|
|
return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
|
2007-06-03 03:33:17 +08:00
|
|
|
}
|
|
|
|
|
2010-12-05 10:00:02 +08:00
|
|
|
/// EmitIgnoredExpr - Emit code to compute the specified expression,
|
|
|
|
/// ignoring the result.
|
|
|
|
void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
|
|
|
|
if (E->isRValue())
|
|
|
|
return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
|
|
|
|
|
|
|
|
// Just emit it as an l-value and drop the result.
|
|
|
|
EmitLValue(E);
|
|
|
|
}
|
|
|
|
|
2010-09-15 18:14:12 +08:00
|
|
|
/// EmitAnyExpr - Emit code to compute the specified expression which
|
|
|
|
/// can have any type. The result is returned as an RValue struct.
|
|
|
|
/// If this is an aggregate expression, AggSlot indicates where the
|
2009-09-09 21:00:44 +08:00
|
|
|
/// result should be returned.
|
2010-09-15 18:14:12 +08:00
|
|
|
RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
|
|
|
|
bool IgnoreResult) {
|
2007-09-01 06:49:20 +08:00
|
|
|
if (!hasAggregateLLVMType(E->getType()))
|
2009-05-29 23:46:01 +08:00
|
|
|
return RValue::get(EmitScalarExpr(E, IgnoreResult));
|
2008-04-05 00:54:41 +08:00
|
|
|
else if (E->getType()->isAnyComplexType())
|
2010-11-16 18:08:07 +08:00
|
|
|
return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2010-09-15 18:14:12 +08:00
|
|
|
EmitAggExpr(E, AggSlot, IgnoreResult);
|
|
|
|
return AggSlot.asRValue();
|
2007-09-01 06:49:20 +08:00
|
|
|
}
|
|
|
|
|
2009-09-09 21:00:44 +08:00
|
|
|
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
|
|
|
|
/// always be accessible even if no aggregate location is provided.
|
2010-09-15 18:14:12 +08:00
|
|
|
RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
|
|
|
|
AggValueSlot AggSlot = AggValueSlot::ignored();
|
2009-09-09 21:00:44 +08:00
|
|
|
|
|
|
|
if (hasAggregateLLVMType(E->getType()) &&
|
2008-09-09 09:06:48 +08:00
|
|
|
!E->getType()->isAnyComplexType())
|
2010-09-15 18:14:12 +08:00
|
|
|
AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
|
|
|
|
return EmitAnyExpr(E, AggSlot);
|
2008-09-09 09:06:48 +08:00
|
|
|
}
|
|
|
|
|
2010-04-21 18:05:39 +08:00
|
|
|
/// EmitAnyExprToMem - Evaluate an expression into a given memory
|
|
|
|
/// location.
|
|
|
|
void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
|
|
|
|
llvm::Value *Location,
|
2012-03-30 01:37:10 +08:00
|
|
|
Qualifiers Quals,
|
|
|
|
bool IsInit) {
|
2011-12-03 08:54:26 +08:00
|
|
|
// FIXME: This function should take an LValue as an argument.
|
|
|
|
if (E->getType()->isAnyComplexType()) {
|
2011-06-16 07:02:42 +08:00
|
|
|
EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
|
2011-12-03 08:54:26 +08:00
|
|
|
} else if (hasAggregateLLVMType(E->getType())) {
|
2011-12-03 10:13:40 +08:00
|
|
|
CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
|
2011-12-03 08:54:26 +08:00
|
|
|
EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
|
2012-03-30 01:37:10 +08:00
|
|
|
AggValueSlot::IsDestructed_t(IsInit),
|
2011-08-26 13:38:08 +08:00
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2012-03-30 01:37:10 +08:00
|
|
|
AggValueSlot::IsAliased_t(!IsInit)));
|
2011-12-03 08:54:26 +08:00
|
|
|
} else {
|
2010-04-21 18:05:39 +08:00
|
|
|
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
|
2010-08-21 11:08:16 +08:00
|
|
|
LValue LV = MakeAddrLValue(Location, E->getType());
|
2011-06-25 10:11:03 +08:00
|
|
|
EmitStoreThroughLValue(RV, LV);
|
2010-04-21 18:05:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-26 02:29:30 +08:00
|
|
|
namespace {
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
/// \brief An adjustment to be made to the temporary created when emitting a
|
|
|
|
/// reference binding, which accesses a particular subobject of that temporary.
|
2010-11-26 02:29:30 +08:00
|
|
|
struct SubobjectAdjustment {
|
|
|
|
enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
|
|
|
|
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
const CastExpr *BasePath;
|
|
|
|
const CXXRecordDecl *DerivedClass;
|
|
|
|
} DerivedToBase;
|
|
|
|
|
|
|
|
FieldDecl *Field;
|
|
|
|
};
|
|
|
|
|
|
|
|
SubobjectAdjustment(const CastExpr *BasePath,
|
|
|
|
const CXXRecordDecl *DerivedClass)
|
|
|
|
: Kind(DerivedToBaseAdjustment) {
|
|
|
|
DerivedToBase.BasePath = BasePath;
|
|
|
|
DerivedToBase.DerivedClass = DerivedClass;
|
|
|
|
}
|
|
|
|
|
|
|
|
SubobjectAdjustment(FieldDecl *Field)
|
|
|
|
: Kind(FieldAdjustment) {
|
|
|
|
this->Field = Field;
|
|
|
|
}
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
};
|
2010-11-26 02:29:30 +08:00
|
|
|
}
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
|
2010-06-28 01:23:46 +08:00
|
|
|
static llvm::Value *
|
2011-07-20 12:59:57 +08:00
|
|
|
CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
|
2010-06-28 01:23:46 +08:00
|
|
|
const NamedDecl *InitializedDecl) {
|
|
|
|
if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
|
|
|
|
if (VD->hasGlobalStorage()) {
|
2012-02-05 10:13:05 +08:00
|
|
|
SmallString<256> Name;
|
2011-02-11 10:52:17 +08:00
|
|
|
llvm::raw_svector_ostream Out(Name);
|
|
|
|
CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
|
|
|
|
Out.flush();
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
|
2010-06-28 01:23:46 +08:00
|
|
|
|
|
|
|
// Create the reference temporary.
|
|
|
|
llvm::GlobalValue *RefTemp =
|
|
|
|
new llvm::GlobalVariable(CGF.CGM.getModule(),
|
|
|
|
RefTempTy, /*isConstant=*/false,
|
|
|
|
llvm::GlobalValue::InternalLinkage,
|
|
|
|
llvm::Constant::getNullValue(RefTempTy),
|
|
|
|
Name.str());
|
|
|
|
return RefTemp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return CGF.CreateMemTemp(Type, "ref.tmp");
|
|
|
|
}
|
|
|
|
|
2010-06-28 00:56:04 +08:00
|
|
|
static llvm::Value *
|
2010-09-06 08:11:41 +08:00
|
|
|
EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
|
2010-06-28 00:56:04 +08:00
|
|
|
llvm::Value *&ReferenceTemporary,
|
|
|
|
const CXXDestructorDecl *&ReferenceTemporaryDtor,
|
2011-06-16 07:02:42 +08:00
|
|
|
QualType &ObjCARCReferenceLifetimeType,
|
2010-06-28 01:23:46 +08:00
|
|
|
const NamedDecl *InitializedDecl) {
|
2011-11-28 00:50:07 +08:00
|
|
|
// Look through single-element init lists that claim to be lvalues. They're
|
|
|
|
// just syntactic wrappers in this case.
|
|
|
|
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
|
|
|
|
if (ILE->getNumInits() == 1 && ILE->isGLValue())
|
|
|
|
E = ILE->getInit(0);
|
|
|
|
}
|
|
|
|
|
2011-06-22 01:03:29 +08:00
|
|
|
// Look through expressions for materialized temporaries (for now).
|
2011-06-23 00:12:01 +08:00
|
|
|
if (const MaterializeTemporaryExpr *M
|
|
|
|
= dyn_cast<MaterializeTemporaryExpr>(E)) {
|
|
|
|
// Objective-C++ ARC:
|
|
|
|
// If we are binding a reference to a temporary that has ownership, we
|
|
|
|
// need to perform retain/release operations on the temporary.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
|
2011-06-23 00:12:01 +08:00
|
|
|
E->getType()->isObjCLifetimeType() &&
|
|
|
|
(E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
|
|
|
|
E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
|
|
|
|
E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
|
|
|
|
ObjCARCReferenceLifetimeType = E->getType();
|
|
|
|
|
|
|
|
E = M->GetTemporaryExpr();
|
|
|
|
}
|
2011-06-22 01:03:29 +08:00
|
|
|
|
2009-12-19 08:20:10 +08:00
|
|
|
if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
|
|
|
|
E = DAE->getExpr();
|
2010-06-28 00:56:04 +08:00
|
|
|
|
2011-11-10 16:15:53 +08:00
|
|
|
if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
|
|
|
|
CGF.enterFullExpression(EWC);
|
2010-07-06 09:34:17 +08:00
|
|
|
CodeGenFunction::RunCleanupsScope Scope(CGF);
|
2010-06-28 00:56:04 +08:00
|
|
|
|
2011-11-10 16:15:53 +08:00
|
|
|
return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
|
2010-06-28 00:56:04 +08:00
|
|
|
ReferenceTemporary,
|
|
|
|
ReferenceTemporaryDtor,
|
2011-06-16 07:02:42 +08:00
|
|
|
ObjCARCReferenceLifetimeType,
|
2010-06-28 01:23:46 +08:00
|
|
|
InitializedDecl);
|
2009-10-15 08:51:46 +08:00
|
|
|
}
|
2010-06-28 00:56:04 +08:00
|
|
|
|
|
|
|
RValue RV;
|
2011-01-22 10:44:21 +08:00
|
|
|
if (E->isGLValue()) {
|
2010-06-28 00:56:04 +08:00
|
|
|
// Emit the expression as an lvalue.
|
|
|
|
LValue LV = CGF.EmitLValue(E);
|
2011-07-10 13:34:54 +08:00
|
|
|
|
2010-06-28 00:56:04 +08:00
|
|
|
if (LV.isSimple())
|
|
|
|
return LV.getAddress();
|
2009-10-19 07:09:21 +08:00
|
|
|
|
2010-06-28 00:56:04 +08:00
|
|
|
// We have to load the lvalue.
|
2011-06-25 10:11:03 +08:00
|
|
|
RV = CGF.EmitLoadOfLValue(LV);
|
2009-05-20 10:31:19 +08:00
|
|
|
} else {
|
2011-06-23 00:12:01 +08:00
|
|
|
if (!ObjCARCReferenceLifetimeType.isNull()) {
|
|
|
|
ReferenceTemporary = CreateReferenceTemporary(CGF,
|
|
|
|
ObjCARCReferenceLifetimeType,
|
|
|
|
InitializedDecl);
|
|
|
|
|
|
|
|
|
|
|
|
LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
|
|
|
|
ObjCARCReferenceLifetimeType);
|
|
|
|
|
|
|
|
CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
|
|
|
|
RefTempDst, false);
|
|
|
|
|
|
|
|
bool ExtendsLifeOfTemporary = false;
|
|
|
|
if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
|
|
|
|
if (Var->extendsLifetimeOfTemporary())
|
|
|
|
ExtendsLifeOfTemporary = true;
|
|
|
|
} else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
|
|
|
|
ExtendsLifeOfTemporary = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ExtendsLifeOfTemporary) {
|
|
|
|
// Since the lifetime of this temporary isn't going to be extended,
|
|
|
|
// we need to clean it up ourselves at the end of the full expression.
|
|
|
|
switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
break;
|
|
|
|
|
2011-07-13 00:41:08 +08:00
|
|
|
case Qualifiers::OCL_Strong: {
|
|
|
|
assert(!ObjCARCReferenceLifetimeType->isArrayType());
|
|
|
|
CleanupKind cleanupKind = CGF.getARCCleanupKind();
|
|
|
|
CGF.pushDestroy(cleanupKind,
|
|
|
|
ReferenceTemporary,
|
|
|
|
ObjCARCReferenceLifetimeType,
|
|
|
|
CodeGenFunction::destroyARCStrongImprecise,
|
|
|
|
cleanupKind & EHCleanup);
|
2011-06-23 00:12:01 +08:00
|
|
|
break;
|
2011-07-13 00:41:08 +08:00
|
|
|
}
|
2011-06-23 00:12:01 +08:00
|
|
|
|
|
|
|
case Qualifiers::OCL_Weak:
|
2011-07-13 00:41:08 +08:00
|
|
|
assert(!ObjCARCReferenceLifetimeType->isArrayType());
|
|
|
|
CGF.pushDestroy(NormalAndEHCleanup,
|
|
|
|
ReferenceTemporary,
|
|
|
|
ObjCARCReferenceLifetimeType,
|
|
|
|
CodeGenFunction::destroyARCWeak,
|
|
|
|
/*useEHCleanupForArray*/ true);
|
2011-06-23 00:12:01 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ObjCARCReferenceLifetimeType = QualType();
|
|
|
|
}
|
|
|
|
|
|
|
|
return ReferenceTemporary;
|
|
|
|
}
|
|
|
|
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<SubobjectAdjustment, 2> Adjustments;
|
2010-06-28 00:56:04 +08:00
|
|
|
while (true) {
|
2011-04-15 08:35:48 +08:00
|
|
|
E = E->IgnoreParens();
|
2010-05-22 13:17:18 +08:00
|
|
|
|
|
|
|
if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
|
2010-08-25 19:45:40 +08:00
|
|
|
if ((CE->getCastKind() == CK_DerivedToBase ||
|
|
|
|
CE->getCastKind() == CK_UncheckedDerivedToBase) &&
|
2010-05-22 13:17:18 +08:00
|
|
|
E->getType()->isRecordType()) {
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
E = CE->getSubExpr();
|
|
|
|
CXXRecordDecl *Derived
|
|
|
|
= cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
|
2010-08-07 14:22:56 +08:00
|
|
|
Adjustments.push_back(SubobjectAdjustment(CE, Derived));
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
continue;
|
|
|
|
}
|
2010-05-22 13:17:18 +08:00
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
if (CE->getCastKind() == CK_NoOp) {
|
2010-05-22 13:17:18 +08:00
|
|
|
E = CE->getSubExpr();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
|
2010-11-24 13:12:34 +08:00
|
|
|
if (!ME->isArrow() && ME->getBase()->isRValue()) {
|
|
|
|
assert(ME->getBase()->getType()->isRecordType());
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
|
|
|
|
E = ME->getBase();
|
2010-08-21 11:37:02 +08:00
|
|
|
Adjustments.push_back(SubobjectAdjustment(Field));
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2009-10-15 08:51:46 +08:00
|
|
|
}
|
2010-05-22 13:17:18 +08:00
|
|
|
|
2011-02-21 13:25:38 +08:00
|
|
|
if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
|
|
|
|
if (opaque->getType()->isRecordType())
|
|
|
|
return CGF.EmitOpaqueValueLValue(opaque).getAddress();
|
|
|
|
|
2010-05-22 13:17:18 +08:00
|
|
|
// Nothing changed.
|
|
|
|
break;
|
2010-06-28 00:56:04 +08:00
|
|
|
}
|
2009-10-19 07:09:21 +08:00
|
|
|
|
2010-06-28 00:56:04 +08:00
|
|
|
// Create a reference temporary if necessary.
|
2010-09-15 18:14:12 +08:00
|
|
|
AggValueSlot AggSlot = AggValueSlot::ignored();
|
2010-06-28 00:56:04 +08:00
|
|
|
if (CGF.hasAggregateLLVMType(E->getType()) &&
|
2010-09-15 18:14:12 +08:00
|
|
|
!E->getType()->isAnyComplexType()) {
|
2010-06-28 01:23:46 +08:00
|
|
|
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
|
|
|
|
InitializedDecl);
|
2011-12-03 10:13:40 +08:00
|
|
|
CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
|
2011-08-26 04:40:09 +08:00
|
|
|
AggValueSlot::IsDestructed_t isDestructed
|
|
|
|
= AggValueSlot::IsDestructed_t(InitializedDecl != 0);
|
2011-12-03 08:54:26 +08:00
|
|
|
AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
|
|
|
|
Qualifiers(), isDestructed,
|
2011-08-26 07:04:34 +08:00
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2012-03-30 01:37:10 +08:00
|
|
|
AggValueSlot::IsNotAliased);
|
2010-09-15 18:14:12 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2010-06-28 01:23:46 +08:00
|
|
|
if (InitializedDecl) {
|
2010-06-28 00:56:04 +08:00
|
|
|
// Get the destructor for the reference temporary.
|
2009-08-17 01:54:29 +08:00
|
|
|
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
|
2010-06-28 00:56:04 +08:00
|
|
|
CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
|
|
|
|
if (!ClassDecl->hasTrivialDestructor())
|
2010-07-01 22:13:13 +08:00
|
|
|
ReferenceTemporaryDtor = ClassDecl->getDestructor();
|
2009-08-17 01:50:25 +08:00
|
|
|
}
|
|
|
|
}
|
2010-06-28 00:56:04 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
RV = CGF.EmitAnyExpr(E, AggSlot);
|
|
|
|
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
// Check if need to perform derived-to-base casts and/or field accesses, to
|
|
|
|
// get from the temporary object we created (and, potentially, for which we
|
|
|
|
// extended the lifetime) to the subobject we're binding the reference to.
|
|
|
|
if (!Adjustments.empty()) {
|
2010-06-28 00:56:04 +08:00
|
|
|
llvm::Value *Object = RV.getAggregateAddr();
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
for (unsigned I = Adjustments.size(); I != 0; --I) {
|
|
|
|
SubobjectAdjustment &Adjustment = Adjustments[I-1];
|
|
|
|
switch (Adjustment.Kind) {
|
|
|
|
case SubobjectAdjustment::DerivedToBaseAdjustment:
|
2010-06-28 00:56:04 +08:00
|
|
|
Object =
|
|
|
|
CGF.GetAddressOfBaseClass(Object,
|
|
|
|
Adjustment.DerivedToBase.DerivedClass,
|
2010-08-07 14:22:56 +08:00
|
|
|
Adjustment.DerivedToBase.BasePath->path_begin(),
|
|
|
|
Adjustment.DerivedToBase.BasePath->path_end(),
|
2010-06-28 00:56:04 +08:00
|
|
|
/*NullCheckValue=*/false);
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SubobjectAdjustment::FieldAdjustment: {
|
2010-06-28 00:56:04 +08:00
|
|
|
LValue LV =
|
2010-08-21 11:37:02 +08:00
|
|
|
CGF.EmitLValueForField(Object, Adjustment.Field, 0);
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
if (LV.isSimple()) {
|
|
|
|
Object = LV.getAddress();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// For non-simple lvalues, we actually have to create a copy of
|
|
|
|
// the object we're binding to.
|
2010-08-21 11:37:02 +08:00
|
|
|
QualType T = Adjustment.Field->getType().getNonReferenceType()
|
|
|
|
.getUnqualifiedType();
|
2010-06-28 01:52:15 +08:00
|
|
|
Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
|
2010-08-21 11:37:02 +08:00
|
|
|
LValue TempLV = CGF.MakeAddrLValue(Object,
|
|
|
|
Adjustment.Field->getType());
|
2011-06-25 10:11:03 +08:00
|
|
|
CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
break;
|
|
|
|
}
|
2010-06-28 00:56:04 +08:00
|
|
|
|
Rework our handling of binding a reference to a temporary
subobject. Previously, we could only properly bind to a base class
subobject while extending the lifetime of the complete object (of a
derived type); for non-static data member subobjects, we could memcpy
(!) the result and bind to that, which is rather broken.
Now, we pull apart the expression that we're binding to, to figure out
which subobject we're accessing, then construct the temporary object
(adding a destruction if needed) and, finally, dig out the subobject
we actually meant to access.
This fixes yet another instance where we were memcpy'ing rather than
doing the right thing. However, note the FIXME in references.cpp:
there's more work to be done for binding to subobjects, since the AST
is incorrectly modeling some member accesses in base classes as
lvalues when they are really rvalues.
llvm-svn: 104219
2010-05-20 16:36:28 +08:00
|
|
|
}
|
|
|
|
}
|
2011-03-17 06:34:09 +08:00
|
|
|
|
|
|
|
return Object;
|
2009-10-15 08:51:46 +08:00
|
|
|
}
|
2009-05-20 08:36:58 +08:00
|
|
|
}
|
2009-05-20 10:31:19 +08:00
|
|
|
|
2010-06-28 00:56:04 +08:00
|
|
|
if (RV.isAggregate())
|
|
|
|
return RV.getAggregateAddr();
|
|
|
|
|
2010-06-27 23:24:55 +08:00
|
|
|
// Create a temporary variable that we can bind the reference to.
|
2010-06-28 01:23:46 +08:00
|
|
|
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
|
|
|
|
InitializedDecl);
|
|
|
|
|
2010-08-21 10:24:36 +08:00
|
|
|
|
|
|
|
unsigned Alignment =
|
|
|
|
CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
|
2010-06-28 00:56:04 +08:00
|
|
|
if (RV.isScalar())
|
|
|
|
CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
|
2010-08-21 10:24:36 +08:00
|
|
|
/*Volatile=*/false, Alignment, E->getType());
|
2010-06-27 23:24:55 +08:00
|
|
|
else
|
2010-06-28 00:56:04 +08:00
|
|
|
CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
|
|
|
|
/*Volatile=*/false);
|
|
|
|
return ReferenceTemporary;
|
|
|
|
}
|
|
|
|
|
|
|
|
RValue
|
2010-09-06 08:11:41 +08:00
|
|
|
CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
|
2010-06-28 00:56:04 +08:00
|
|
|
const NamedDecl *InitializedDecl) {
|
|
|
|
llvm::Value *ReferenceTemporary = 0;
|
|
|
|
const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
|
2011-06-16 07:02:42 +08:00
|
|
|
QualType ObjCARCReferenceLifetimeType;
|
2010-06-28 00:56:04 +08:00
|
|
|
llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
|
|
|
|
ReferenceTemporaryDtor,
|
2011-06-16 07:02:42 +08:00
|
|
|
ObjCARCReferenceLifetimeType,
|
2010-06-28 00:56:04 +08:00
|
|
|
InitializedDecl);
|
2011-06-16 07:02:42 +08:00
|
|
|
if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
|
2010-06-28 01:52:15 +08:00
|
|
|
return RValue::get(Value);
|
|
|
|
|
2010-06-28 00:56:04 +08:00
|
|
|
// Make sure to call the destructor for the reference temporary.
|
2011-06-16 07:02:42 +08:00
|
|
|
const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
|
|
|
|
if (VD && VD->hasGlobalStorage()) {
|
|
|
|
if (ReferenceTemporaryDtor) {
|
2010-06-28 01:52:15 +08:00
|
|
|
llvm::Constant *DtorFn =
|
|
|
|
CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
|
2011-02-08 16:22:06 +08:00
|
|
|
EmitCXXGlobalDtorRegistration(DtorFn,
|
|
|
|
cast<llvm::Constant>(ReferenceTemporary));
|
2011-06-16 07:02:42 +08:00
|
|
|
} else {
|
|
|
|
assert(!ObjCARCReferenceLifetimeType.isNull());
|
|
|
|
// Note: We intentionally do not register a global "destructor" to
|
|
|
|
// release the object.
|
2010-06-28 01:52:15 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
return RValue::get(Value);
|
2010-06-28 01:52:15 +08:00
|
|
|
}
|
2010-07-21 14:29:51 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
if (ReferenceTemporaryDtor)
|
|
|
|
PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
|
|
|
|
else {
|
|
|
|
switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
|
|
|
|
case Qualifiers::OCL_None:
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable(
|
|
|
|
"Not a reference temporary that needs to be deallocated");
|
2011-06-16 07:02:42 +08:00
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
// Nothing to do.
|
|
|
|
break;
|
|
|
|
|
2011-07-13 00:41:08 +08:00
|
|
|
case Qualifiers::OCL_Strong: {
|
|
|
|
bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
|
|
|
|
CleanupKind cleanupKind = getARCCleanupKind();
|
2011-07-13 02:37:23 +08:00
|
|
|
pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
|
2012-01-26 11:33:36 +08:00
|
|
|
precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
|
|
|
|
cleanupKind & EHCleanup);
|
2011-06-16 07:02:42 +08:00
|
|
|
break;
|
2011-07-13 00:41:08 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
2011-07-13 02:37:23 +08:00
|
|
|
case Qualifiers::OCL_Weak: {
|
2011-06-16 07:02:42 +08:00
|
|
|
// __weak objects always get EH cleanups; otherwise, exceptions
|
|
|
|
// could cause really nasty crashes instead of mere leaks.
|
2011-07-13 00:41:08 +08:00
|
|
|
pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
|
2012-01-26 11:33:36 +08:00
|
|
|
ObjCARCReferenceLifetimeType, destroyARCWeak, true);
|
2011-06-16 07:02:42 +08:00
|
|
|
break;
|
|
|
|
}
|
2011-07-13 02:37:23 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
}
|
|
|
|
|
2010-06-28 00:56:04 +08:00
|
|
|
return RValue::get(Value);
|
2009-05-20 08:24:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-09-09 21:00:44 +08:00
|
|
|
/// getAccessedFieldNo - Given an encoded value and a result number, return the
|
|
|
|
/// input field number being accessed.
|
|
|
|
unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
|
2008-05-22 08:50:06 +08:00
|
|
|
const llvm::Constant *Elts) {
|
2012-01-30 14:20:36 +08:00
|
|
|
return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
|
|
|
|
->getZExtValue();
|
2008-05-22 08:50:06 +08:00
|
|
|
}
|
|
|
|
|
2009-12-16 10:57:00 +08:00
|
|
|
void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
|
|
|
|
if (!CatchUndefined)
|
|
|
|
return;
|
|
|
|
|
2011-02-08 16:22:06 +08:00
|
|
|
// This needs to be to the standard address space.
|
|
|
|
Address = Builder.CreateBitCast(Address, Int8PtrTy);
|
2009-12-16 10:57:00 +08:00
|
|
|
|
2011-07-15 01:45:50 +08:00
|
|
|
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
|
2010-04-11 02:34:14 +08:00
|
|
|
|
2009-12-16 10:57:00 +08:00
|
|
|
// In time, people may want to control this and use a 1 here.
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::Value *Arg = Builder.getFalse();
|
2009-12-16 10:57:00 +08:00
|
|
|
llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
|
|
|
|
llvm::BasicBlock *Cont = createBasicBlock();
|
|
|
|
llvm::BasicBlock *Check = createBasicBlock();
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
|
2009-12-16 10:57:00 +08:00
|
|
|
Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
|
|
|
|
|
|
|
|
EmitBlock(Check);
|
|
|
|
Builder.CreateCondBr(Builder.CreateICmpUGE(C,
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::ConstantInt::get(IntPtrTy, Size)),
|
2009-12-16 10:57:00 +08:00
|
|
|
Cont, getTrapBB());
|
|
|
|
EmitBlock(Cont);
|
|
|
|
}
|
2007-09-01 06:49:20 +08:00
|
|
|
|
2010-01-10 05:40:03 +08:00
|
|
|
|
|
|
|
CodeGenFunction::ComplexPairTy CodeGenFunction::
|
|
|
|
EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
|
|
|
|
bool isInc, bool isPre) {
|
|
|
|
ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
|
|
|
|
LV.isVolatileQualified());
|
|
|
|
|
|
|
|
llvm::Value *NextVal;
|
|
|
|
if (isa<llvm::IntegerType>(InVal.first->getType())) {
|
|
|
|
uint64_t AmountVal = isInc ? 1 : -1;
|
|
|
|
NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
|
|
|
|
|
|
|
|
// Add the inc/dec to the real part.
|
|
|
|
NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
|
|
|
|
} else {
|
|
|
|
QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
|
|
|
|
llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
|
|
|
|
if (!isInc)
|
|
|
|
FVal.changeSign();
|
|
|
|
NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
|
|
|
|
|
|
|
|
// Add the inc/dec to the real part.
|
|
|
|
NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
|
|
|
|
}
|
|
|
|
|
|
|
|
ComplexPairTy IncVal(NextVal, InVal.second);
|
|
|
|
|
|
|
|
// Store the updated result through the lvalue.
|
|
|
|
StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
|
|
|
|
|
|
|
|
// If this is a postinc, return the value read from memory, otherwise use the
|
|
|
|
// updated value.
|
|
|
|
return isPre ? IncVal : InVal;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-03 03:47:04 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2007-06-02 13:24:33 +08:00
|
|
|
// LValue Expression Emission
|
2007-06-03 03:47:04 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2007-06-02 13:24:33 +08:00
|
|
|
|
2009-02-05 15:09:07 +08:00
|
|
|
RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
|
2009-10-29 01:39:19 +08:00
|
|
|
if (Ty->isVoidType())
|
2009-02-05 15:09:07 +08:00
|
|
|
return RValue::get(0);
|
2009-10-29 01:39:19 +08:00
|
|
|
|
|
|
|
if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *EltTy = ConvertType(CTy->getElementType());
|
2009-07-31 07:11:26 +08:00
|
|
|
llvm::Value *U = llvm::UndefValue::get(EltTy);
|
2009-01-10 04:09:28 +08:00
|
|
|
return RValue::getComplex(std::make_pair(U, U));
|
2009-10-29 01:39:19 +08:00
|
|
|
}
|
|
|
|
|
2010-08-23 13:26:13 +08:00
|
|
|
// If this is a use of an undefined aggregate type, the aggregate must have an
|
|
|
|
// identifiable address. Just because the contents of the value are undefined
|
|
|
|
// doesn't mean that the address can't be taken and compared.
|
2009-10-29 01:39:19 +08:00
|
|
|
if (hasAggregateLLVMType(Ty)) {
|
2010-08-23 13:26:13 +08:00
|
|
|
llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
|
|
|
|
return RValue::getAggregate(DestPtr);
|
2009-01-10 04:09:28 +08:00
|
|
|
}
|
2009-10-29 01:39:19 +08:00
|
|
|
|
|
|
|
return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
|
2009-01-10 00:50:52 +08:00
|
|
|
}
|
|
|
|
|
2009-02-05 15:09:07 +08:00
|
|
|
RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
|
|
|
|
const char *Name) {
|
|
|
|
ErrorUnsupported(E, Name);
|
|
|
|
return GetUndefRValue(E->getType());
|
|
|
|
}
|
|
|
|
|
2008-08-26 04:45:57 +08:00
|
|
|
LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
|
|
|
|
const char *Name) {
|
|
|
|
ErrorUnsupported(E, Name);
|
2009-07-30 06:16:19 +08:00
|
|
|
llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
|
2008-08-26 04:45:57 +08:00
|
|
|
}
|
|
|
|
|
2009-12-16 10:57:00 +08:00
|
|
|
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
|
|
|
|
LValue LV = EmitLValue(E);
|
2010-04-06 05:36:35 +08:00
|
|
|
if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
|
2011-01-19 09:58:38 +08:00
|
|
|
EmitCheck(LV.getAddress(),
|
|
|
|
getContext().getTypeSizeInChars(E->getType()).getQuantity());
|
2009-12-16 10:57:00 +08:00
|
|
|
return LV;
|
|
|
|
}
|
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
/// EmitLValue - Emit code to compute a designator that specifies the location
|
|
|
|
/// of the expression.
|
|
|
|
///
|
2009-09-09 21:00:44 +08:00
|
|
|
/// This can return one of two things: a simple address or a bitfield reference.
|
|
|
|
/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
|
|
|
|
/// an LLVM pointer type.
|
2007-06-06 04:53:16 +08:00
|
|
|
///
|
2009-09-09 21:00:44 +08:00
|
|
|
/// If this returns a bitfield reference, nothing about the pointee type of the
|
|
|
|
/// LLVM value is known: For example, it may not be a pointer to an integer.
|
2007-06-06 04:53:16 +08:00
|
|
|
///
|
2009-09-09 21:00:44 +08:00
|
|
|
/// If this returns a normal address, and if the lvalue's C type is fixed size,
|
|
|
|
/// this method guarantees that the returned pointer type will point to an LLVM
|
|
|
|
/// type of the same size of the lvalue's type. If the lvalue has a variable
|
|
|
|
/// length type, this is not possible.
|
2007-06-06 04:53:16 +08:00
|
|
|
///
|
2007-06-02 13:24:33 +08:00
|
|
|
LValue CodeGenFunction::EmitLValue(const Expr *E) {
|
|
|
|
switch (E->getStmtClass()) {
|
2008-08-26 04:45:57 +08:00
|
|
|
default: return EmitUnsupportedLValue(E, "l-value expression");
|
2007-06-02 13:24:33 +08:00
|
|
|
|
2011-11-07 11:59:57 +08:00
|
|
|
case Expr::ObjCPropertyRefExprClass:
|
|
|
|
llvm_unreachable("cannot emit a property reference directly");
|
|
|
|
|
2010-06-18 03:56:20 +08:00
|
|
|
case Expr::ObjCSelectorExprClass:
|
|
|
|
return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
|
2009-12-10 07:35:29 +08:00
|
|
|
case Expr::ObjCIsaExprClass:
|
|
|
|
return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
|
2009-09-09 21:00:44 +08:00
|
|
|
case Expr::BinaryOperatorClass:
|
2008-09-04 11:20:13 +08:00
|
|
|
return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
|
2010-04-23 12:16:32 +08:00
|
|
|
case Expr::CompoundAssignOperatorClass:
|
2010-12-05 10:00:02 +08:00
|
|
|
if (!E->getType()->isAnyComplexType())
|
|
|
|
return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
|
|
|
|
return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
|
2009-09-09 21:00:44 +08:00
|
|
|
case Expr::CallExprClass:
|
2009-09-02 05:18:52 +08:00
|
|
|
case Expr::CXXMemberCallExprClass:
|
2008-11-15 00:09:21 +08:00
|
|
|
case Expr::CXXOperatorCallExprClass:
|
2012-03-07 16:35:16 +08:00
|
|
|
case Expr::UserDefinedLiteralClass:
|
2008-11-15 00:09:21 +08:00
|
|
|
return EmitCallExprLValue(cast<CallExpr>(E));
|
2009-02-12 04:59:32 +08:00
|
|
|
case Expr::VAArgExprClass:
|
|
|
|
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
|
2009-09-09 21:00:44 +08:00
|
|
|
case Expr::DeclRefExprClass:
|
2009-01-06 13:10:23 +08:00
|
|
|
return EmitDeclRefLValue(cast<DeclRefExpr>(E));
|
2011-09-09 01:15:04 +08:00
|
|
|
case Expr::ParenExprClass:
|
|
|
|
return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
|
2011-04-15 08:35:48 +08:00
|
|
|
case Expr::GenericSelectionExprClass:
|
|
|
|
return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
|
2008-08-10 09:53:14 +08:00
|
|
|
case Expr::PredefinedExprClass:
|
|
|
|
return EmitPredefinedLValue(cast<PredefinedExpr>(E));
|
2007-06-06 12:54:52 +08:00
|
|
|
case Expr::StringLiteralClass:
|
|
|
|
return EmitStringLiteralLValue(cast<StringLiteral>(E));
|
2009-02-25 06:18:39 +08:00
|
|
|
case Expr::ObjCEncodeExprClass:
|
|
|
|
return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
|
2011-11-06 17:01:30 +08:00
|
|
|
case Expr::PseudoObjectExprClass:
|
|
|
|
return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
|
2011-11-28 00:50:07 +08:00
|
|
|
case Expr::InitListExprClass:
|
|
|
|
assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
|
|
|
|
"Only single-element init list can be lvalue.");
|
|
|
|
return EmitLValue(cast<InitListExpr>(E)->getInit(0));
|
2008-03-31 07:03:07 +08:00
|
|
|
|
2009-05-31 07:23:33 +08:00
|
|
|
case Expr::CXXTemporaryObjectExprClass:
|
|
|
|
case Expr::CXXConstructExprClass:
|
2009-05-31 07:30:54 +08:00
|
|
|
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
|
|
|
|
case Expr::CXXBindTemporaryExprClass:
|
|
|
|
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
|
2012-02-08 13:34:55 +08:00
|
|
|
case Expr::LambdaExprClass:
|
|
|
|
return EmitLambdaLValue(cast<LambdaExpr>(E));
|
2011-11-10 16:15:53 +08:00
|
|
|
|
|
|
|
case Expr::ExprWithCleanupsClass: {
|
|
|
|
const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
|
|
|
|
enterFullExpression(cleanups);
|
|
|
|
RunCleanupsScope Scope(*this);
|
|
|
|
return EmitLValue(cleanups->getSubExpr());
|
|
|
|
}
|
|
|
|
|
2010-07-08 14:14:04 +08:00
|
|
|
case Expr::CXXScalarValueInitExprClass:
|
|
|
|
return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
|
2009-11-14 09:51:50 +08:00
|
|
|
case Expr::CXXDefaultArgExprClass:
|
|
|
|
return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
|
2009-11-15 16:09:41 +08:00
|
|
|
case Expr::CXXTypeidExprClass:
|
|
|
|
return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
|
2009-05-31 07:30:54 +08:00
|
|
|
|
2008-08-23 18:51:21 +08:00
|
|
|
case Expr::ObjCMessageExprClass:
|
|
|
|
return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
|
2009-09-09 21:00:44 +08:00
|
|
|
case Expr::ObjCIvarRefExprClass:
|
2008-03-31 07:03:07 +08:00
|
|
|
return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
|
2009-04-26 03:35:26 +08:00
|
|
|
case Expr::StmtExprClass:
|
|
|
|
return EmitStmtExprLValue(cast<StmtExpr>(E));
|
2009-09-09 21:00:44 +08:00
|
|
|
case Expr::UnaryOperatorClass:
|
2007-06-06 04:53:16 +08:00
|
|
|
return EmitUnaryOpLValue(cast<UnaryOperator>(E));
|
2007-06-09 07:31:14 +08:00
|
|
|
case Expr::ArraySubscriptExprClass:
|
|
|
|
return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
|
2008-04-19 07:10:10 +08:00
|
|
|
case Expr::ExtVectorElementExprClass:
|
|
|
|
return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
|
2009-09-09 21:00:44 +08:00
|
|
|
case Expr::MemberExprClass:
|
When a member reference expression includes a qualifier on the member
name, e.g.,
x->Base::f()
retain the qualifier (and its source range information) in a new
subclass of MemberExpr called CXXQualifiedMemberExpr. Provide
construction, transformation, profiling, printing, etc., for this new
expression type.
When a virtual function is called via a qualified name, don't emit a
virtual call. Instead, call that function directly. Mike, could you
add a CodeGen test for this, too?
llvm-svn: 80167
2009-08-27 06:36:53 +08:00
|
|
|
return EmitMemberExpr(cast<MemberExpr>(E));
|
2008-05-14 07:18:27 +08:00
|
|
|
case Expr::CompoundLiteralExprClass:
|
|
|
|
return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
|
2009-03-24 10:38:23 +08:00
|
|
|
case Expr::ConditionalOperatorClass:
|
2009-09-16 00:35:24 +08:00
|
|
|
return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
|
2011-02-17 18:25:35 +08:00
|
|
|
case Expr::BinaryConditionalOperatorClass:
|
|
|
|
return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
|
2008-12-12 13:35:08 +08:00
|
|
|
case Expr::ChooseExprClass:
|
2009-03-04 13:52:32 +08:00
|
|
|
return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
|
2011-02-16 16:02:54 +08:00
|
|
|
case Expr::OpaqueValueExprClass:
|
|
|
|
return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
|
2011-07-15 13:09:51 +08:00
|
|
|
case Expr::SubstNonTypeTemplateParmExprClass:
|
|
|
|
return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
|
2009-03-18 12:02:57 +08:00
|
|
|
case Expr::ImplicitCastExprClass:
|
|
|
|
case Expr::CStyleCastExprClass:
|
|
|
|
case Expr::CXXFunctionalCastExprClass:
|
|
|
|
case Expr::CXXStaticCastExprClass:
|
|
|
|
case Expr::CXXDynamicCastExprClass:
|
|
|
|
case Expr::CXXReinterpretCastExprClass:
|
|
|
|
case Expr::CXXConstCastExprClass:
|
2011-06-16 07:02:42 +08:00
|
|
|
case Expr::ObjCBridgedCastExprClass:
|
2009-03-19 02:28:57 +08:00
|
|
|
return EmitCastLValue(cast<CastExpr>(E));
|
2011-11-28 00:50:07 +08:00
|
|
|
|
2011-06-22 01:03:29 +08:00
|
|
|
case Expr::MaterializeTemporaryExprClass:
|
|
|
|
return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
|
2007-06-02 13:24:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-10 11:05:10 +08:00
|
|
|
/// Given an object of the given canonical type, can we safely copy a
|
|
|
|
/// value out of it based on its initializer?
|
|
|
|
static bool isConstantEmittableObjectType(QualType type) {
|
|
|
|
assert(type.isCanonical());
|
|
|
|
assert(!type->isReferenceType());
|
|
|
|
|
|
|
|
// Must be const-qualified but non-volatile.
|
|
|
|
Qualifiers qs = type.getLocalQualifiers();
|
|
|
|
if (!qs.hasConst() || qs.hasVolatile()) return false;
|
|
|
|
|
|
|
|
// Otherwise, all object types satisfy this except C++ classes with
|
|
|
|
// mutable subobjects or non-trivial copy/destroy behavior.
|
|
|
|
if (const RecordType *RT = dyn_cast<RecordType>(type))
|
|
|
|
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
|
|
|
|
if (RD->hasMutableFields() || !RD->isTrivial())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Can we constant-emit a load of a reference to a variable of the
|
|
|
|
/// given type? This is different from predicates like
|
|
|
|
/// Decl::isUsableInConstantExpressions because we do want it to apply
|
|
|
|
/// in situations that don't necessarily satisfy the language's rules
|
|
|
|
/// for this (e.g. C++'s ODR-use rules). For example, we want to able
|
|
|
|
/// to do this with const float variables even if those variables
|
|
|
|
/// aren't marked 'constexpr'.
|
|
|
|
enum ConstantEmissionKind {
|
|
|
|
CEK_None,
|
|
|
|
CEK_AsReferenceOnly,
|
|
|
|
CEK_AsValueOrReference,
|
|
|
|
CEK_AsValueOnly
|
|
|
|
};
|
|
|
|
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
|
|
|
|
type = type.getCanonicalType();
|
|
|
|
if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
|
|
|
|
if (isConstantEmittableObjectType(ref->getPointeeType()))
|
|
|
|
return CEK_AsValueOrReference;
|
|
|
|
return CEK_AsReferenceOnly;
|
|
|
|
}
|
|
|
|
if (isConstantEmittableObjectType(type))
|
|
|
|
return CEK_AsValueOnly;
|
|
|
|
return CEK_None;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Try to emit a reference to the given value without producing it as
|
|
|
|
/// an l-value. This is actually more than an optimization: we can't
|
|
|
|
/// produce an l-value for variables that we never actually captured
|
|
|
|
/// in a block or lambda, which means const int variables or constexpr
|
|
|
|
/// literals or similar.
|
|
|
|
CodeGenFunction::ConstantEmission
|
2012-03-10 17:33:50 +08:00
|
|
|
CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
|
|
|
|
ValueDecl *value = refExpr->getDecl();
|
|
|
|
|
2012-03-10 11:05:10 +08:00
|
|
|
// The value needs to be an enum constant or a constant variable.
|
|
|
|
ConstantEmissionKind CEK;
|
|
|
|
if (isa<ParmVarDecl>(value)) {
|
|
|
|
CEK = CEK_None;
|
|
|
|
} else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
|
|
|
|
CEK = checkVarTypeForConstantEmission(var->getType());
|
|
|
|
} else if (isa<EnumConstantDecl>(value)) {
|
|
|
|
CEK = CEK_AsValueOnly;
|
|
|
|
} else {
|
|
|
|
CEK = CEK_None;
|
|
|
|
}
|
|
|
|
if (CEK == CEK_None) return ConstantEmission();
|
|
|
|
|
|
|
|
Expr::EvalResult result;
|
|
|
|
bool resultIsReference;
|
|
|
|
QualType resultType;
|
|
|
|
|
|
|
|
// It's best to evaluate all the way as an r-value if that's permitted.
|
|
|
|
if (CEK != CEK_AsReferenceOnly &&
|
2012-03-10 17:33:50 +08:00
|
|
|
refExpr->EvaluateAsRValue(result, getContext())) {
|
2012-03-10 11:05:10 +08:00
|
|
|
resultIsReference = false;
|
|
|
|
resultType = refExpr->getType();
|
|
|
|
|
|
|
|
// Otherwise, try to evaluate as an l-value.
|
|
|
|
} else if (CEK != CEK_AsValueOnly &&
|
2012-03-10 17:33:50 +08:00
|
|
|
refExpr->EvaluateAsLValue(result, getContext())) {
|
2012-03-10 11:05:10 +08:00
|
|
|
resultIsReference = true;
|
|
|
|
resultType = value->getType();
|
|
|
|
|
|
|
|
// Failure.
|
|
|
|
} else {
|
|
|
|
return ConstantEmission();
|
|
|
|
}
|
|
|
|
|
|
|
|
// In any case, if the initializer has side-effects, abandon ship.
|
|
|
|
if (result.HasSideEffects)
|
|
|
|
return ConstantEmission();
|
|
|
|
|
|
|
|
// Emit as a constant.
|
|
|
|
llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
|
|
|
|
|
|
|
|
// Make sure we emit a debug reference to the global variable.
|
|
|
|
// This should probably fire even for
|
|
|
|
if (isa<VarDecl>(value)) {
|
|
|
|
if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
|
2012-03-10 17:33:50 +08:00
|
|
|
EmitDeclRefExprDbgValue(refExpr, C);
|
2012-03-10 11:05:10 +08:00
|
|
|
} else {
|
|
|
|
assert(isa<EnumConstantDecl>(value));
|
2012-03-10 17:33:50 +08:00
|
|
|
EmitDeclRefExprDbgValue(refExpr, C);
|
2012-03-10 11:05:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we emitted a reference constant, we need to dereference that.
|
|
|
|
if (resultIsReference)
|
|
|
|
return ConstantEmission::forReference(C);
|
|
|
|
|
|
|
|
return ConstantEmission::forValue(C);
|
|
|
|
}
|
|
|
|
|
2011-06-16 12:16:24 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
|
|
|
|
return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
|
2011-12-03 12:14:32 +08:00
|
|
|
lvalue.getAlignment().getQuantity(),
|
|
|
|
lvalue.getType(), lvalue.getTBAAInfo());
|
2011-06-16 12:16:24 +08:00
|
|
|
}
|
|
|
|
|
2012-03-25 00:50:34 +08:00
|
|
|
static bool hasBooleanRepresentation(QualType Ty) {
|
|
|
|
if (Ty->isBooleanType())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (const EnumType *ET = Ty->getAs<EnumType>())
|
|
|
|
return ET->getDecl()->getIntegerType()->isBooleanType();
|
|
|
|
|
2012-04-13 04:42:30 +08:00
|
|
|
if (const AtomicType *AT = Ty->getAs<AtomicType>())
|
|
|
|
return hasBooleanRepresentation(AT->getValueType());
|
|
|
|
|
2012-03-25 00:50:34 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
|
|
|
|
const EnumType *ET = Ty->getAs<EnumType>();
|
2012-03-28 07:58:37 +08:00
|
|
|
bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
|
|
|
|
CGM.getCodeGenOpts().StrictEnums &&
|
|
|
|
!ET->getDecl()->isFixed());
|
2012-03-25 00:50:34 +08:00
|
|
|
bool IsBool = hasBooleanRepresentation(Ty);
|
|
|
|
llvm::Type *LTy;
|
|
|
|
if (!IsBool && !IsRegularCPlusPlusEnum)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
llvm::APInt Min;
|
|
|
|
llvm::APInt End;
|
|
|
|
if (IsBool) {
|
|
|
|
Min = llvm::APInt(8, 0);
|
|
|
|
End = llvm::APInt(8, 2);
|
|
|
|
LTy = Int8Ty;
|
|
|
|
} else {
|
|
|
|
const EnumDecl *ED = ET->getDecl();
|
|
|
|
LTy = ConvertTypeForMem(ED->getIntegerType());
|
|
|
|
unsigned Bitwidth = LTy->getScalarSizeInBits();
|
|
|
|
unsigned NumNegativeBits = ED->getNumNegativeBits();
|
|
|
|
unsigned NumPositiveBits = ED->getNumPositiveBits();
|
|
|
|
|
|
|
|
if (NumNegativeBits) {
|
|
|
|
unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
|
|
|
|
assert(NumBits <= Bitwidth);
|
|
|
|
End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
|
|
|
|
Min = -End;
|
|
|
|
} else {
|
|
|
|
assert(NumPositiveBits <= Bitwidth);
|
|
|
|
End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
|
|
|
|
Min = llvm::APInt(Bitwidth, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (End == Min)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
llvm::Value *LowAndHigh[2];
|
|
|
|
LowAndHigh[0] = llvm::ConstantInt::get(LTy, Min);
|
|
|
|
LowAndHigh[1] = llvm::ConstantInt::get(LTy, End);
|
|
|
|
|
|
|
|
llvm::LLVMContext &C = getLLVMContext();
|
|
|
|
llvm::MDNode *Range = llvm::MDNode::get(C, LowAndHigh);
|
|
|
|
return Range;
|
|
|
|
}
|
|
|
|
|
2009-02-10 08:57:50 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
|
2010-10-15 07:06:10 +08:00
|
|
|
unsigned Alignment, QualType Ty,
|
|
|
|
llvm::MDNode *TBAAInfo) {
|
2011-09-28 05:06:10 +08:00
|
|
|
llvm::LoadInst *Load = Builder.CreateLoad(Addr);
|
2009-11-30 05:23:36 +08:00
|
|
|
if (Volatile)
|
|
|
|
Load->setVolatile(true);
|
2010-08-21 10:24:36 +08:00
|
|
|
if (Alignment)
|
|
|
|
Load->setAlignment(Alignment);
|
2010-10-15 07:06:10 +08:00
|
|
|
if (TBAAInfo)
|
|
|
|
CGM.DecorateInstruction(Load, TBAAInfo);
|
2012-01-17 01:27:18 +08:00
|
|
|
// If this is an atomic type, all normal reads must be atomic
|
|
|
|
if (Ty->isAtomicType())
|
|
|
|
Load->setAtomic(llvm::SequentiallyConsistent);
|
2009-02-10 08:57:50 +08:00
|
|
|
|
2012-03-25 00:50:34 +08:00
|
|
|
if (CGM.getCodeGenOpts().OptimizationLevel > 0)
|
|
|
|
if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
|
|
|
|
Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
|
2010-10-09 07:50:27 +08:00
|
|
|
|
2012-03-25 00:50:34 +08:00
|
|
|
return EmitFromMemory(Load, Ty);
|
2012-03-24 22:43:42 +08:00
|
|
|
}
|
|
|
|
|
2010-10-28 04:58:56 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
|
|
|
|
// Bool has a different representation in memory than in registers.
|
2012-03-25 00:50:34 +08:00
|
|
|
if (hasBooleanRepresentation(Ty)) {
|
2010-10-28 04:58:56 +08:00
|
|
|
// This should really always be an i1, but sometimes it's already
|
|
|
|
// an i8, and it's awkward to track those cases down.
|
|
|
|
if (Value->getType()->isIntegerTy(1))
|
|
|
|
return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
|
|
|
|
assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
|
|
|
|
}
|
2010-10-28 01:13:49 +08:00
|
|
|
|
2010-10-28 04:58:56 +08:00
|
|
|
return Value;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
|
|
|
|
// Bool has a different representation in memory than in registers.
|
2012-03-25 00:50:34 +08:00
|
|
|
if (hasBooleanRepresentation(Ty)) {
|
2010-10-28 04:58:56 +08:00
|
|
|
assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
|
|
|
|
return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
|
2010-10-28 01:13:49 +08:00
|
|
|
}
|
|
|
|
|
2010-10-28 04:58:56 +08:00
|
|
|
return Value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
|
|
|
|
bool Volatile, unsigned Alignment,
|
|
|
|
QualType Ty,
|
2012-01-17 01:27:18 +08:00
|
|
|
llvm::MDNode *TBAAInfo,
|
|
|
|
bool isInit) {
|
2010-10-28 04:58:56 +08:00
|
|
|
Value = EmitToMemory(Value, Ty);
|
2011-07-10 11:38:35 +08:00
|
|
|
|
2010-08-21 10:24:36 +08:00
|
|
|
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
|
|
|
|
if (Alignment)
|
|
|
|
Store->setAlignment(Alignment);
|
2010-10-15 07:06:10 +08:00
|
|
|
if (TBAAInfo)
|
|
|
|
CGM.DecorateInstruction(Store, TBAAInfo);
|
2012-01-17 01:27:18 +08:00
|
|
|
if (!isInit && Ty->isAtomicType())
|
|
|
|
Store->setAtomic(llvm::SequentiallyConsistent);
|
2009-02-10 08:57:50 +08:00
|
|
|
}
|
|
|
|
|
2012-01-17 01:27:18 +08:00
|
|
|
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
|
|
|
|
bool isInit) {
|
2011-06-16 12:16:24 +08:00
|
|
|
EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
|
2011-12-03 12:14:32 +08:00
|
|
|
lvalue.getAlignment().getQuantity(), lvalue.getType(),
|
2012-01-17 01:27:18 +08:00
|
|
|
lvalue.getTBAAInfo(), isInit);
|
2011-06-16 12:16:24 +08:00
|
|
|
}
|
|
|
|
|
2009-09-09 21:00:44 +08:00
|
|
|
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
|
|
|
|
/// method emits the address of the lvalue, then loads the result as an rvalue,
|
|
|
|
/// returning the rvalue.
|
2011-06-25 10:11:03 +08:00
|
|
|
RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
|
2008-11-20 01:34:06 +08:00
|
|
|
if (LV.isObjCWeak()) {
|
2009-09-09 21:00:44 +08:00
|
|
|
// load of a __weak object.
|
2008-11-19 05:45:40 +08:00
|
|
|
llvm::Value *AddrWeakObj = LV.getAddress();
|
2009-10-29 01:39:19 +08:00
|
|
|
return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
|
|
|
|
AddrWeakObj));
|
2008-11-19 05:45:40 +08:00
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
|
|
|
|
return RValue::get(EmitARCLoadWeak(LV.getAddress()));
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2007-07-11 05:17:59 +08:00
|
|
|
if (LV.isSimple()) {
|
2011-06-28 05:24:11 +08:00
|
|
|
assert(!LV.getType()->isFunctionType());
|
2010-08-22 18:59:02 +08:00
|
|
|
|
|
|
|
// Everything needs a load.
|
2011-06-25 10:11:03 +08:00
|
|
|
return RValue::get(EmitLoadOfScalar(LV));
|
2007-07-11 05:17:59 +08:00
|
|
|
}
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2007-07-11 05:17:59 +08:00
|
|
|
if (LV.isVectorElt()) {
|
2012-03-23 06:36:39 +08:00
|
|
|
llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
|
|
|
|
LV.isVolatileQualified());
|
|
|
|
Load->setAlignment(LV.getAlignment().getQuantity());
|
|
|
|
return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
|
2007-07-11 05:17:59 +08:00
|
|
|
"vecext"));
|
|
|
|
}
|
implement lvalue to rvalue conversion for ocuvector components. We can now compile stuff
like this:
typedef __attribute__(( ocu_vector_type(4) )) float float4;
float4 test1(float4 V) {
return V.wzyx+V;
}
to:
_test1:
pshufd $27, %xmm0, %xmm1
addps %xmm0, %xmm1
movaps %xmm1, %xmm0
ret
and:
_test1:
mfspr r2, 256
oris r3, r2, 4096
mtspr 256, r3
li r3, lo16(LCPI1_0)
lis r4, ha16(LCPI1_0)
lvx v3, r4, r3
vperm v3, v2, v2, v3
vaddfp v2, v3, v2
mtspr 256, r2
blr
llvm-svn: 40771
2007-08-03 08:16:29 +08:00
|
|
|
|
|
|
|
// If this is a reference to a subset of the elements of a vector, either
|
|
|
|
// shuffle the input or extract/insert them as appropriate.
|
2008-04-19 07:10:10 +08:00
|
|
|
if (LV.isExtVectorElt())
|
2011-06-25 10:11:03 +08:00
|
|
|
return EmitLoadOfExtVectorElementLValue(LV);
|
2008-01-23 04:17:04 +08:00
|
|
|
|
2011-11-07 11:59:57 +08:00
|
|
|
assert(LV.isBitField() && "Unknown LValue type!");
|
|
|
|
return EmitLoadOfBitfieldLValue(LV);
|
2007-08-04 00:18:34 +08:00
|
|
|
}
|
2007-08-03 23:52:31 +08:00
|
|
|
|
2011-06-25 10:11:03 +08:00
|
|
|
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
|
2010-04-06 09:07:44 +08:00
|
|
|
const CGBitFieldInfo &Info = LV.getBitFieldInfo();
|
2008-01-23 04:17:04 +08:00
|
|
|
|
2010-04-14 07:34:15 +08:00
|
|
|
// Get the output type.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ResLTy = ConvertType(LV.getType());
|
2010-04-14 07:34:15 +08:00
|
|
|
unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
|
|
|
|
|
|
|
|
// Compute the result as an OR of all of the individual component accesses.
|
|
|
|
llvm::Value *Res = 0;
|
|
|
|
for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
|
|
|
|
const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
|
|
|
|
|
|
|
|
// Get the field pointer.
|
|
|
|
llvm::Value *Ptr = LV.getBitFieldBaseAddr();
|
|
|
|
|
|
|
|
// Only offset by the field index if used, so that incoming values are not
|
|
|
|
// required to be structures.
|
|
|
|
if (AI.FieldIndex)
|
|
|
|
Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
|
|
|
|
|
|
|
|
// Offset by the byte offset, if used.
|
2011-04-24 18:04:59 +08:00
|
|
|
if (!AI.FieldByteOffset.isZero()) {
|
2011-02-08 16:22:06 +08:00
|
|
|
Ptr = EmitCastToVoidPtr(Ptr);
|
2011-04-24 18:04:59 +08:00
|
|
|
Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
|
|
|
|
"bf.field.offs");
|
2010-04-14 07:34:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Cast to the access type.
|
2012-02-07 08:39:47 +08:00
|
|
|
llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
|
2011-06-25 10:11:03 +08:00
|
|
|
CGM.getContext().getTargetAddressSpace(LV.getType()));
|
2010-04-14 07:34:15 +08:00
|
|
|
Ptr = Builder.CreateBitCast(Ptr, PTy);
|
|
|
|
|
|
|
|
// Perform the load.
|
|
|
|
llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
|
2011-04-24 18:13:17 +08:00
|
|
|
if (!AI.AccessAlignment.isZero())
|
|
|
|
Load->setAlignment(AI.AccessAlignment.getQuantity());
|
2010-04-14 07:34:15 +08:00
|
|
|
|
|
|
|
// Shift out unused low bits and mask out unused high bits.
|
|
|
|
llvm::Value *Val = Load;
|
|
|
|
if (AI.FieldBitStart)
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
Val = Builder.CreateLShr(Load, AI.FieldBitStart);
|
2010-04-14 07:34:15 +08:00
|
|
|
Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
|
|
|
|
AI.TargetBitWidth),
|
|
|
|
"bf.clear");
|
|
|
|
|
|
|
|
// Extend or truncate to the target size.
|
|
|
|
if (AI.AccessWidth < ResSizeInBits)
|
|
|
|
Val = Builder.CreateZExt(Val, ResLTy);
|
|
|
|
else if (AI.AccessWidth > ResSizeInBits)
|
|
|
|
Val = Builder.CreateTrunc(Val, ResLTy);
|
|
|
|
|
|
|
|
// Shift into place, and OR into the result.
|
|
|
|
if (AI.TargetBitOffset)
|
|
|
|
Val = Builder.CreateShl(Val, AI.TargetBitOffset);
|
|
|
|
Res = Res ? Builder.CreateOr(Res, Val) : Val;
|
2008-08-06 13:08:45 +08:00
|
|
|
}
|
2008-05-18 04:03:47 +08:00
|
|
|
|
2010-04-14 07:34:15 +08:00
|
|
|
// If the bit-field is signed, perform the sign-extension.
|
|
|
|
//
|
|
|
|
// FIXME: This can easily be folded into the load of the high bits, which
|
|
|
|
// could also eliminate the mask of high bits in some situations.
|
|
|
|
if (Info.isSigned()) {
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
unsigned ExtraBits = ResSizeInBits - Info.getSize();
|
2010-04-14 07:34:15 +08:00
|
|
|
if (ExtraBits)
|
|
|
|
Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
|
|
|
|
ExtraBits, "bf.val.sext");
|
|
|
|
}
|
2008-05-18 04:03:47 +08:00
|
|
|
|
2010-04-14 07:34:15 +08:00
|
|
|
return RValue::get(Res);
|
2008-01-23 04:17:04 +08:00
|
|
|
}
|
|
|
|
|
2009-01-18 14:42:49 +08:00
|
|
|
// If this is a reference to a subset of the elements of a vector, create an
|
|
|
|
// appropriate shufflevector.
|
2011-06-25 10:11:03 +08:00
|
|
|
RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
|
2012-03-23 06:36:39 +08:00
|
|
|
llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
|
|
|
|
LV.isVolatileQualified());
|
|
|
|
Load->setAlignment(LV.getAlignment().getQuantity());
|
|
|
|
llvm::Value *Vec = Load;
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2008-05-09 14:41:27 +08:00
|
|
|
const llvm::Constant *Elts = LV.getExtVectorElts();
|
2009-09-09 21:00:44 +08:00
|
|
|
|
|
|
|
// If the result of the expression is a non-vector type, we must be extracting
|
|
|
|
// a single element. Just codegen as an extractelement.
|
2011-06-25 10:11:03 +08:00
|
|
|
const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
|
2007-08-11 01:10:08 +08:00
|
|
|
if (!ExprVT) {
|
2008-05-22 08:50:06 +08:00
|
|
|
unsigned InIdx = getAccessedFieldNo(0, Elts);
|
2010-06-27 15:15:29 +08:00
|
|
|
llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
|
2011-09-28 05:06:10 +08:00
|
|
|
return RValue::get(Builder.CreateExtractElement(Vec, Elt));
|
2007-08-04 00:18:34 +08:00
|
|
|
}
|
2009-01-18 14:42:49 +08:00
|
|
|
|
|
|
|
// Always use shuffle vector to try to retain the original program structure
|
2007-08-11 01:10:08 +08:00
|
|
|
unsigned NumResultElts = ExprVT->getNumElements();
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<llvm::Constant*, 4> Mask;
|
2012-01-25 13:34:41 +08:00
|
|
|
for (unsigned i = 0; i != NumResultElts; ++i)
|
|
|
|
Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2011-02-15 08:14:06 +08:00
|
|
|
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
|
|
|
|
Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
|
2011-09-28 05:06:10 +08:00
|
|
|
MaskV);
|
2009-01-18 14:42:49 +08:00
|
|
|
return RValue::get(Vec);
|
2007-06-06 04:53:16 +08:00
|
|
|
}
|
|
|
|
|
2007-08-04 00:18:34 +08:00
|
|
|
|
2007-06-30 00:31:29 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
|
|
|
|
/// lvalue, where both are guaranteed to the have the same type, and that type
|
|
|
|
/// is 'Ty'.
|
2012-01-17 01:27:18 +08:00
|
|
|
void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
|
2007-08-04 00:28:33 +08:00
|
|
|
if (!Dst.isSimple()) {
|
|
|
|
if (Dst.isVectorElt()) {
|
|
|
|
// Read/modify/write the vector, inserting the new element.
|
2012-03-23 06:36:39 +08:00
|
|
|
llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
|
|
|
|
Dst.isVolatileQualified());
|
|
|
|
Load->setAlignment(Dst.getAlignment().getQuantity());
|
|
|
|
llvm::Value *Vec = Load;
|
2007-09-01 06:49:20 +08:00
|
|
|
Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
|
2007-08-04 00:28:33 +08:00
|
|
|
Dst.getVectorIdx(), "vecins");
|
2012-03-23 06:36:39 +08:00
|
|
|
llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
|
|
|
|
Dst.isVolatileQualified());
|
|
|
|
Store->setAlignment(Dst.getAlignment().getQuantity());
|
2007-08-04 00:28:33 +08:00
|
|
|
return;
|
|
|
|
}
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2008-04-19 07:10:10 +08:00
|
|
|
// If this is an update of extended vector elements, insert them as
|
|
|
|
// appropriate.
|
|
|
|
if (Dst.isExtVectorElt())
|
2011-06-25 10:11:03 +08:00
|
|
|
return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
|
2008-01-23 06:36:45 +08:00
|
|
|
|
2011-11-07 11:59:57 +08:00
|
|
|
assert(Dst.isBitField() && "Unknown LValue type");
|
|
|
|
return EmitStoreThroughBitfieldLValue(Src, Dst);
|
2007-08-04 00:28:33 +08:00
|
|
|
}
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
// There's special magic for assigning into an ARC-qualified l-value.
|
|
|
|
if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
|
|
|
|
switch (Lifetime) {
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
llvm_unreachable("present but none");
|
|
|
|
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
// nothing special
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Strong:
|
2011-06-25 10:11:03 +08:00
|
|
|
EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
|
2011-06-16 07:02:42 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
|
|
|
|
return;
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
2011-06-25 10:11:03 +08:00
|
|
|
Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
|
|
|
|
Src.getScalarVal()));
|
2011-06-16 07:02:42 +08:00
|
|
|
// fall into the normal path
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-21 08:30:43 +08:00
|
|
|
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
|
2009-09-09 21:00:44 +08:00
|
|
|
// load of a __weak object.
|
2008-11-20 01:34:06 +08:00
|
|
|
llvm::Value *LvalueDst = Dst.getAddress();
|
|
|
|
llvm::Value *src = Src.getScalarVal();
|
2009-04-14 08:57:29 +08:00
|
|
|
CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
|
2008-11-20 01:34:06 +08:00
|
|
|
return;
|
|
|
|
}
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2009-02-21 08:30:43 +08:00
|
|
|
if (Dst.isObjCStrong() && !Dst.isNonGC()) {
|
2009-09-09 21:00:44 +08:00
|
|
|
// load of a __strong object.
|
2008-11-20 01:34:06 +08:00
|
|
|
llvm::Value *LvalueDst = Dst.getAddress();
|
|
|
|
llvm::Value *src = Src.getScalarVal();
|
2009-09-25 06:25:38 +08:00
|
|
|
if (Dst.isObjCIvar()) {
|
|
|
|
assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ResultType = ConvertType(getContext().LongTy);
|
2009-09-25 06:25:38 +08:00
|
|
|
llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
|
2009-09-25 08:00:20 +08:00
|
|
|
llvm::Value *dst = RHS;
|
2009-09-25 06:25:38 +08:00
|
|
|
RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
|
|
|
|
llvm::Value *LHS =
|
|
|
|
Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
|
|
|
|
llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
|
2009-09-25 08:00:20 +08:00
|
|
|
CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
|
2009-09-25 06:25:38 +08:00
|
|
|
BytesBetween);
|
2010-07-21 04:30:03 +08:00
|
|
|
} else if (Dst.isGlobalObjCRef()) {
|
|
|
|
CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
|
|
|
|
Dst.isThreadLocalRef());
|
|
|
|
}
|
2009-05-05 07:27:20 +08:00
|
|
|
else
|
|
|
|
CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
|
2008-11-20 01:34:06 +08:00
|
|
|
return;
|
|
|
|
}
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2007-08-11 08:04:45 +08:00
|
|
|
assert(Src.isScalar() && "Can't emit an agg store with this method");
|
2012-01-17 01:27:18 +08:00
|
|
|
EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
|
2007-06-06 04:53:16 +08:00
|
|
|
}
|
|
|
|
|
2008-01-23 06:36:45 +08:00
|
|
|
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
|
2008-11-19 17:36:46 +08:00
|
|
|
llvm::Value **Result) {
|
2010-04-06 09:07:44 +08:00
|
|
|
const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
|
2008-01-23 06:36:45 +08:00
|
|
|
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
// Get the output type.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
|
2008-01-23 06:36:45 +08:00
|
|
|
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
// Get the source value, truncated to the width of the bit-field.
|
2008-11-19 17:36:46 +08:00
|
|
|
llvm::Value *SrcVal = Src.getScalarVal();
|
2010-04-18 05:52:22 +08:00
|
|
|
|
2012-04-13 04:42:30 +08:00
|
|
|
if (hasBooleanRepresentation(Dst.getType()))
|
2010-04-18 05:52:22 +08:00
|
|
|
SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
|
|
|
|
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
|
|
|
|
Info.getSize()),
|
|
|
|
"bf.value");
|
2008-08-06 13:08:45 +08:00
|
|
|
|
2008-11-19 17:36:46 +08:00
|
|
|
// Return the new value of the bit-field, if requested.
|
|
|
|
if (Result) {
|
|
|
|
// Cast back to the proper type for result.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *SrcTy = Src.getScalarVal()->getType();
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
|
|
|
|
"bf.reload.val");
|
2008-11-19 17:36:46 +08:00
|
|
|
|
|
|
|
// Sign extend if necessary.
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
if (Info.isSigned()) {
|
|
|
|
unsigned ExtraBits = ResSizeInBits - Info.getSize();
|
|
|
|
if (ExtraBits)
|
|
|
|
ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
|
|
|
|
ExtraBits, "bf.reload.sext");
|
2008-11-19 17:36:46 +08:00
|
|
|
}
|
|
|
|
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
*Result = ReloadVal;
|
2008-11-19 17:36:46 +08:00
|
|
|
}
|
|
|
|
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
// Iterate over the components, writing each piece to memory.
|
|
|
|
for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
|
|
|
|
const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
|
|
|
|
|
|
|
|
// Get the field pointer.
|
|
|
|
llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
|
2011-02-08 16:22:06 +08:00
|
|
|
unsigned addressSpace =
|
|
|
|
cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
|
|
|
|
// Only offset by the field index if used, so that incoming values are not
|
|
|
|
// required to be structures.
|
|
|
|
if (AI.FieldIndex)
|
|
|
|
Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
|
|
|
|
|
|
|
|
// Offset by the byte offset, if used.
|
2011-04-24 18:04:59 +08:00
|
|
|
if (!AI.FieldByteOffset.isZero()) {
|
2011-02-08 16:22:06 +08:00
|
|
|
Ptr = EmitCastToVoidPtr(Ptr);
|
2011-04-24 18:04:59 +08:00
|
|
|
Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
|
|
|
|
"bf.field.offs");
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Cast to the access type.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *AccessLTy =
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
Ptr = Builder.CreateBitCast(Ptr, PTy);
|
|
|
|
|
|
|
|
// Extract the piece of the bit-field value to write in this access, limited
|
|
|
|
// to the values that are part of this access.
|
|
|
|
llvm::Value *Val = SrcVal;
|
|
|
|
if (AI.TargetBitOffset)
|
|
|
|
Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
|
|
|
|
Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
|
|
|
|
AI.TargetBitWidth));
|
|
|
|
|
|
|
|
// Extend or truncate to the access size.
|
|
|
|
if (ResSizeInBits < AI.AccessWidth)
|
|
|
|
Val = Builder.CreateZExt(Val, AccessLTy);
|
|
|
|
else if (ResSizeInBits > AI.AccessWidth)
|
|
|
|
Val = Builder.CreateTrunc(Val, AccessLTy);
|
|
|
|
|
|
|
|
// Shift into the position in memory.
|
|
|
|
if (AI.FieldBitStart)
|
|
|
|
Val = Builder.CreateShl(Val, AI.FieldBitStart);
|
|
|
|
|
|
|
|
// If necessary, load and OR in bits that are outside of the bit-field.
|
|
|
|
if (AI.TargetBitWidth != AI.AccessWidth) {
|
|
|
|
llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
|
2011-04-24 18:13:17 +08:00
|
|
|
if (!AI.AccessAlignment.isZero())
|
|
|
|
Load->setAlignment(AI.AccessAlignment.getQuantity());
|
IRgen: (Reapply 101222, with fixes) Move EmitStoreThroughBitfieldLValue to use new CGBitfieldInfo::AccessInfo decomposition, instead of computing the access policy itself.
- Sadly, this doesn't seem to give any .ll size win so far. It is possible to make this routine significantly smarter & avoid various shifting, masking, and zext/sext, but I'm not really convinced it is worth it. It is tricky, and this is really instcombine's job.
- No intended functionality change; the test case is just to increase coverage & serves as a demo file, it worked before this commit.
The new fixes from r101222 are:
1. The shift to the target position needs to occur after the value is extended to the correct size. This broke Clang bootstrap, among other things no doubt.
2. Swap the order of arguments to OR, to get a tad more constant folding.
llvm-svn: 101339
2010-04-15 11:47:33 +08:00
|
|
|
|
|
|
|
// Compute the mask for zeroing the bits that are part of the bit-field.
|
|
|
|
llvm::APInt InvMask =
|
|
|
|
~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
|
|
|
|
AI.FieldBitStart + AI.TargetBitWidth);
|
|
|
|
|
|
|
|
// Apply the mask and OR in to the value to write.
|
|
|
|
Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the value.
|
|
|
|
llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
|
|
|
|
Dst.isVolatileQualified());
|
2011-04-24 18:13:17 +08:00
|
|
|
if (!AI.AccessAlignment.isZero())
|
|
|
|
Store->setAlignment(AI.AccessAlignment.getQuantity());
|
2008-08-06 13:08:45 +08:00
|
|
|
}
|
2008-01-23 06:36:45 +08:00
|
|
|
}
|
|
|
|
|
2008-04-19 07:10:10 +08:00
|
|
|
void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
|
2011-06-25 10:11:03 +08:00
|
|
|
LValue Dst) {
|
2007-08-04 00:28:33 +08:00
|
|
|
// This access turns into a read/modify/write of the vector. Load the input
|
|
|
|
// value now.
|
2012-03-23 06:36:39 +08:00
|
|
|
llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
|
|
|
|
Dst.isVolatileQualified());
|
|
|
|
Load->setAlignment(Dst.getAlignment().getQuantity());
|
|
|
|
llvm::Value *Vec = Load;
|
2008-05-09 14:41:27 +08:00
|
|
|
const llvm::Constant *Elts = Dst.getExtVectorElts();
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2007-09-01 06:49:20 +08:00
|
|
|
llvm::Value *SrcVal = Src.getScalarVal();
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2011-06-25 10:11:03 +08:00
|
|
|
if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
|
2007-08-04 00:37:04 +08:00
|
|
|
unsigned NumSrcElts = VTy->getNumElements();
|
2009-01-18 14:42:49 +08:00
|
|
|
unsigned NumDstElts =
|
|
|
|
cast<llvm::VectorType>(Vec->getType())->getNumElements();
|
|
|
|
if (NumDstElts == NumSrcElts) {
|
2009-09-09 21:00:44 +08:00
|
|
|
// Use shuffle vector is the src and destination are the same number of
|
|
|
|
// elements and restore the vector mask since it is on the side it will be
|
|
|
|
// stored.
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
|
2012-01-25 13:34:41 +08:00
|
|
|
for (unsigned i = 0; i != NumSrcElts; ++i)
|
|
|
|
Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2011-02-15 08:14:06 +08:00
|
|
|
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
|
2009-01-18 14:42:49 +08:00
|
|
|
Vec = Builder.CreateShuffleVector(SrcVal,
|
2009-07-31 07:11:26 +08:00
|
|
|
llvm::UndefValue::get(Vec->getType()),
|
2011-09-28 05:06:10 +08:00
|
|
|
MaskV);
|
2009-07-31 06:28:39 +08:00
|
|
|
} else if (NumDstElts > NumSrcElts) {
|
2009-01-18 14:42:49 +08:00
|
|
|
// Extended the source vector to the same length and then shuffle it
|
|
|
|
// into the destination.
|
|
|
|
// FIXME: since we're shuffling with undef, can we just use the indices
|
|
|
|
// into that? This could be simpler.
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<llvm::Constant*, 4> ExtMask;
|
2012-02-14 20:06:21 +08:00
|
|
|
for (unsigned i = 0; i != NumSrcElts; ++i)
|
2012-01-25 13:34:41 +08:00
|
|
|
ExtMask.push_back(Builder.getInt32(i));
|
2012-02-14 20:06:21 +08:00
|
|
|
ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
|
2011-02-15 08:14:06 +08:00
|
|
|
llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
|
2009-09-09 21:00:44 +08:00
|
|
|
llvm::Value *ExtSrcVal =
|
2009-02-18 02:31:04 +08:00
|
|
|
Builder.CreateShuffleVector(SrcVal,
|
2009-07-31 07:11:26 +08:00
|
|
|
llvm::UndefValue::get(SrcVal->getType()),
|
2011-09-28 05:06:10 +08:00
|
|
|
ExtMaskV);
|
2009-01-18 14:42:49 +08:00
|
|
|
// build identity
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<llvm::Constant*, 4> Mask;
|
2009-10-29 01:39:19 +08:00
|
|
|
for (unsigned i = 0; i != NumDstElts; ++i)
|
2012-01-25 13:34:41 +08:00
|
|
|
Mask.push_back(Builder.getInt32(i));
|
2009-10-29 01:39:19 +08:00
|
|
|
|
2009-01-18 14:42:49 +08:00
|
|
|
// modify when what gets shuffled in
|
2012-01-25 13:34:41 +08:00
|
|
|
for (unsigned i = 0; i != NumSrcElts; ++i)
|
|
|
|
Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
|
2011-02-15 08:14:06 +08:00
|
|
|
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
|
2011-09-28 05:06:10 +08:00
|
|
|
Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
|
2009-07-31 06:28:39 +08:00
|
|
|
} else {
|
2009-01-18 14:42:49 +08:00
|
|
|
// We should never shorten the vector
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable("unexpected shorten vector length");
|
2007-08-04 00:37:04 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If the Src is a scalar (not a vector) it must be updating one element.
|
2008-05-22 08:50:06 +08:00
|
|
|
unsigned InIdx = getAccessedFieldNo(0, Elts);
|
2009-10-29 01:39:19 +08:00
|
|
|
llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
|
2011-09-28 05:06:10 +08:00
|
|
|
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
|
2007-08-04 00:28:33 +08:00
|
|
|
}
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2012-03-23 06:36:39 +08:00
|
|
|
llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
|
|
|
|
Dst.isVolatileQualified());
|
|
|
|
Store->setAlignment(Dst.getAlignment().getQuantity());
|
2007-08-04 00:28:33 +08:00
|
|
|
}
|
|
|
|
|
2009-09-17 05:37:16 +08:00
|
|
|
// setObjCGCLValueClass - sets class of he lvalue for the purpose of
|
|
|
|
// generating write-barries API. It is currently a global, ivar,
|
|
|
|
// or neither.
|
2009-10-29 01:39:19 +08:00
|
|
|
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
|
2011-10-01 02:23:36 +08:00
|
|
|
LValue &LV,
|
|
|
|
bool IsMemberAccess=false) {
|
2012-03-11 15:00:24 +08:00
|
|
|
if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
|
2009-09-17 05:37:16 +08:00
|
|
|
return;
|
|
|
|
|
2009-09-17 07:11:23 +08:00
|
|
|
if (isa<ObjCIvarRefExpr>(E)) {
|
2011-10-01 02:23:36 +08:00
|
|
|
QualType ExpTy = E->getType();
|
|
|
|
if (IsMemberAccess && ExpTy->isPointerType()) {
|
|
|
|
// If ivar is a structure pointer, assigning to field of
|
|
|
|
// this struct follows gcc's behavior and makes it a non-ivar
|
|
|
|
// writer-barrier conservatively.
|
|
|
|
ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
|
|
|
|
if (ExpTy->isRecordType()) {
|
|
|
|
LV.setObjCIvar(false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2010-08-21 11:51:29 +08:00
|
|
|
LV.setObjCIvar(true);
|
2009-09-25 06:25:38 +08:00
|
|
|
ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
|
|
|
|
LV.setBaseIvarExp(Exp->getBase());
|
2010-08-21 11:51:29 +08:00
|
|
|
LV.setObjCArray(E->getType()->isArrayType());
|
2009-09-17 07:11:23 +08:00
|
|
|
return;
|
|
|
|
}
|
2009-10-29 01:39:19 +08:00
|
|
|
|
2009-09-17 05:37:16 +08:00
|
|
|
if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
|
|
|
|
if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
|
2010-10-15 12:57:14 +08:00
|
|
|
if (VD->hasGlobalStorage()) {
|
2010-08-21 11:51:29 +08:00
|
|
|
LV.setGlobalObjCRef(true);
|
|
|
|
LV.setThreadLocalRef(VD->isThreadSpecified());
|
2010-07-21 04:30:03 +08:00
|
|
|
}
|
2009-09-17 05:37:16 +08:00
|
|
|
}
|
2010-08-21 11:51:29 +08:00
|
|
|
LV.setObjCArray(E->getType()->isArrayType());
|
2009-10-29 01:39:19 +08:00
|
|
|
return;
|
2009-09-17 05:37:16 +08:00
|
|
|
}
|
2009-10-29 01:39:19 +08:00
|
|
|
|
|
|
|
if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
|
2011-10-01 02:23:36 +08:00
|
|
|
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
|
2009-10-29 01:39:19 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
|
2011-10-01 02:23:36 +08:00
|
|
|
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
|
2009-10-01 01:10:29 +08:00
|
|
|
if (LV.isObjCIvar()) {
|
|
|
|
// If cast is to a structure pointer, follow gcc's behavior and make it
|
|
|
|
// a non-ivar write-barrier.
|
|
|
|
QualType ExpTy = E->getType();
|
|
|
|
if (ExpTy->isPointerType())
|
|
|
|
ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
|
|
|
|
if (ExpTy->isRecordType())
|
2010-08-21 11:51:29 +08:00
|
|
|
LV.setObjCIvar(false);
|
2009-10-29 01:39:19 +08:00
|
|
|
}
|
|
|
|
return;
|
2009-10-01 01:10:29 +08:00
|
|
|
}
|
2011-04-15 08:35:48 +08:00
|
|
|
|
|
|
|
if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
|
|
|
|
setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-29 01:39:19 +08:00
|
|
|
if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
|
2011-10-01 02:23:36 +08:00
|
|
|
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
|
2009-10-29 01:39:19 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
|
2011-10-01 02:23:36 +08:00
|
|
|
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
|
2009-10-29 01:39:19 +08:00
|
|
|
return;
|
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
|
2011-10-01 02:23:36 +08:00
|
|
|
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
|
2011-06-16 07:02:42 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-29 01:39:19 +08:00
|
|
|
if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
|
2009-09-17 05:37:16 +08:00
|
|
|
setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
|
2009-09-22 02:54:29 +08:00
|
|
|
if (LV.isObjCIvar() && !LV.isObjCArray())
|
2009-09-18 08:04:00 +08:00
|
|
|
// Using array syntax to assigning to what an ivar points to is not
|
|
|
|
// same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
|
2010-08-21 11:51:29 +08:00
|
|
|
LV.setObjCIvar(false);
|
2009-09-22 02:54:29 +08:00
|
|
|
else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
|
|
|
|
// Using array syntax to assigning to what global points to is not
|
|
|
|
// same as assigning to the global itself. {id *G;} G[i] = 0;
|
2010-08-21 11:51:29 +08:00
|
|
|
LV.setGlobalObjCRef(false);
|
2009-10-29 01:39:19 +08:00
|
|
|
return;
|
2009-09-18 08:04:00 +08:00
|
|
|
}
|
2011-10-01 02:23:36 +08:00
|
|
|
|
2009-10-29 01:39:19 +08:00
|
|
|
if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
|
2011-10-01 02:23:36 +08:00
|
|
|
setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
|
2009-09-18 08:04:00 +08:00
|
|
|
// We don't know if member is an 'ivar', but this flag is looked at
|
|
|
|
// only in the context of LV.isObjCIvar().
|
2010-08-21 11:51:29 +08:00
|
|
|
LV.setObjCArray(E->getType()->isArrayType());
|
2009-10-29 01:39:19 +08:00
|
|
|
return;
|
2009-09-17 05:37:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-12 14:52:18 +08:00
|
|
|
static llvm::Value *
|
2011-07-12 16:58:26 +08:00
|
|
|
EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
|
2011-07-12 14:52:18 +08:00
|
|
|
llvm::Value *V, llvm::Type *IRType,
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Name = StringRef()) {
|
2011-07-12 14:52:18 +08:00
|
|
|
unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
|
2011-07-12 16:58:26 +08:00
|
|
|
return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
|
2011-07-12 14:52:18 +08:00
|
|
|
}
|
|
|
|
|
2009-11-08 07:06:58 +08:00
|
|
|
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
|
|
|
|
const Expr *E, const VarDecl *VD) {
|
2009-11-08 17:46:46 +08:00
|
|
|
assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
|
2009-11-08 07:06:58 +08:00
|
|
|
"Var decl must have external storage or be a file var decl!");
|
|
|
|
|
|
|
|
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
|
2011-11-16 08:42:57 +08:00
|
|
|
llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
|
|
|
|
V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
|
2011-12-03 12:14:32 +08:00
|
|
|
CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
|
2011-11-16 08:42:57 +08:00
|
|
|
QualType T = E->getType();
|
|
|
|
LValue LV;
|
|
|
|
if (VD->getType()->isReferenceType()) {
|
|
|
|
llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
|
2011-12-03 12:14:32 +08:00
|
|
|
LI->setAlignment(Alignment.getQuantity());
|
2011-11-16 08:42:57 +08:00
|
|
|
V = LI;
|
|
|
|
LV = CGF.MakeNaturalAlignAddrLValue(V, T);
|
|
|
|
} else {
|
|
|
|
LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
|
|
|
|
}
|
2009-11-08 07:06:58 +08:00
|
|
|
setObjCGCLValueClass(CGF.getContext(), E, LV);
|
|
|
|
return LV;
|
|
|
|
}
|
|
|
|
|
2009-11-26 14:08:14 +08:00
|
|
|
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
|
2011-07-10 13:34:54 +08:00
|
|
|
const Expr *E, const FunctionDecl *FD) {
|
2010-09-06 08:11:41 +08:00
|
|
|
llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
|
2009-11-26 14:08:14 +08:00
|
|
|
if (!FD->hasPrototype()) {
|
|
|
|
if (const FunctionProtoType *Proto =
|
|
|
|
FD->getType()->getAs<FunctionProtoType>()) {
|
|
|
|
// Ugly case: for a K&R-style definition, the type of the definition
|
|
|
|
// isn't the same as the type of a use. Correct for this with a
|
|
|
|
// bitcast.
|
|
|
|
QualType NoProtoType =
|
|
|
|
CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
|
|
|
|
NoProtoType = CGF.getContext().getPointerType(NoProtoType);
|
2011-09-28 05:06:10 +08:00
|
|
|
V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
|
2009-11-26 14:08:14 +08:00
|
|
|
}
|
|
|
|
}
|
2011-12-03 12:14:32 +08:00
|
|
|
CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
|
2010-08-21 12:20:22 +08:00
|
|
|
return CGF.MakeAddrLValue(V, E->getType(), Alignment);
|
2009-11-26 14:08:14 +08:00
|
|
|
}
|
|
|
|
|
2007-06-02 13:24:33 +08:00
|
|
|
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
|
2009-11-08 06:53:10 +08:00
|
|
|
const NamedDecl *ND = E->getDecl();
|
2011-12-03 12:14:32 +08:00
|
|
|
CharUnits Alignment = getContext().getDeclAlign(ND);
|
2011-11-16 08:42:57 +08:00
|
|
|
QualType T = E->getType();
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2012-01-21 12:52:58 +08:00
|
|
|
// FIXME: We should be able to assert this for FunctionDecls as well!
|
|
|
|
// FIXME: We should be able to assert this for all DeclRefExprs, not just
|
|
|
|
// those with a valid source location.
|
|
|
|
assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
|
|
|
|
!E->getLocation().isValid()) &&
|
|
|
|
"Should not use decl without marking it used!");
|
|
|
|
|
2010-03-05 02:17:24 +08:00
|
|
|
if (ND->hasAttr<WeakRefAttr>()) {
|
2010-09-06 08:11:41 +08:00
|
|
|
const ValueDecl *VD = cast<ValueDecl>(ND);
|
2010-03-05 02:17:24 +08:00
|
|
|
llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
|
2010-08-21 12:20:22 +08:00
|
|
|
return MakeAddrLValue(Aliasee, E->getType(), Alignment);
|
2010-03-05 02:17:24 +08:00
|
|
|
}
|
|
|
|
|
2009-11-08 06:53:10 +08:00
|
|
|
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
|
|
|
|
// Check if this is a global variable.
|
2009-11-08 07:06:58 +08:00
|
|
|
if (VD->hasExternalStorage() || VD->isFileVarDecl())
|
|
|
|
return EmitGlobalVarDeclLValue(*this, E, VD);
|
2009-11-08 06:43:34 +08:00
|
|
|
|
2012-03-10 17:33:50 +08:00
|
|
|
bool isBlockVariable = VD->hasAttr<BlocksAttr>();
|
|
|
|
|
2010-11-20 02:17:09 +08:00
|
|
|
bool NonGCable = VD->hasLocalStorage() &&
|
|
|
|
!VD->getType()->isReferenceType() &&
|
2012-03-10 17:33:50 +08:00
|
|
|
!isBlockVariable;
|
2009-09-25 03:53:00 +08:00
|
|
|
|
2009-11-08 06:46:42 +08:00
|
|
|
llvm::Value *V = LocalDeclMap[VD];
|
2010-09-08 07:26:17 +08:00
|
|
|
if (!V && VD->isStaticLocal())
|
2010-04-20 02:15:02 +08:00
|
|
|
V = CGM.getStaticLocalDeclAddress(VD);
|
2012-02-11 10:57:39 +08:00
|
|
|
|
|
|
|
// Use special handling for lambdas.
|
2012-03-10 17:33:50 +08:00
|
|
|
if (!V) {
|
2012-02-11 10:57:39 +08:00
|
|
|
if (FieldDecl *FD = LambdaCaptureFields.lookup(VD))
|
|
|
|
return EmitLValueForField(CXXABIThisValue, FD, 0);
|
|
|
|
|
2012-03-10 17:33:50 +08:00
|
|
|
assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
|
|
|
|
CharUnits alignment = getContext().getDeclAlign(VD);
|
|
|
|
return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
|
|
|
|
E->getType(), alignment);
|
|
|
|
}
|
|
|
|
|
2009-11-08 06:46:42 +08:00
|
|
|
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
|
2009-09-25 03:53:00 +08:00
|
|
|
|
2012-03-10 17:33:50 +08:00
|
|
|
if (isBlockVariable)
|
2011-01-27 07:08:27 +08:00
|
|
|
V = BuildBlockByrefAddress(V, VD);
|
2010-08-21 11:44:13 +08:00
|
|
|
|
2011-11-16 08:42:57 +08:00
|
|
|
LValue LV;
|
|
|
|
if (VD->getType()->isReferenceType()) {
|
|
|
|
llvm::LoadInst *LI = Builder.CreateLoad(V);
|
2011-12-03 12:14:32 +08:00
|
|
|
LI->setAlignment(Alignment.getQuantity());
|
2011-11-16 08:42:57 +08:00
|
|
|
V = LI;
|
|
|
|
LV = MakeNaturalAlignAddrLValue(V, T);
|
|
|
|
} else {
|
|
|
|
LV = MakeAddrLValue(V, T, Alignment);
|
|
|
|
}
|
2011-07-12 14:52:18 +08:00
|
|
|
|
2010-11-20 02:17:09 +08:00
|
|
|
if (NonGCable) {
|
2010-08-21 11:44:13 +08:00
|
|
|
LV.getQuals().removeObjCGCAttr();
|
2010-08-21 11:22:38 +08:00
|
|
|
LV.setNonGC(true);
|
|
|
|
}
|
2009-09-17 05:37:16 +08:00
|
|
|
setObjCGCLValueClass(getContext(), E, LV);
|
2008-11-20 08:15:42 +08:00
|
|
|
return LV;
|
2009-10-29 01:39:19 +08:00
|
|
|
}
|
2011-02-03 16:15:49 +08:00
|
|
|
|
|
|
|
if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
|
|
|
|
return EmitFunctionDeclLValue(*this, E, fn);
|
|
|
|
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable("Unhandled DeclRefExpr");
|
2007-06-02 13:24:33 +08:00
|
|
|
}
|
2007-06-02 02:02:12 +08:00
|
|
|
|
2007-06-06 04:53:16 +08:00
|
|
|
LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
|
|
|
|
// __extension__ doesn't affect lvalue-ness.
|
2010-08-25 19:45:40 +08:00
|
|
|
if (E->getOpcode() == UO_Extension)
|
2007-06-06 04:53:16 +08:00
|
|
|
return EmitLValue(E->getSubExpr());
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2008-07-27 06:37:01 +08:00
|
|
|
QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
|
2007-10-31 06:53:42 +08:00
|
|
|
switch (E->getOpcode()) {
|
2011-09-23 13:06:16 +08:00
|
|
|
default: llvm_unreachable("Unknown unary operator lvalue!");
|
2010-08-25 19:45:40 +08:00
|
|
|
case UO_Deref: {
|
2009-10-29 01:39:19 +08:00
|
|
|
QualType T = E->getSubExpr()->getType()->getPointeeType();
|
|
|
|
assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
|
|
|
|
|
2011-12-20 05:16:08 +08:00
|
|
|
LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
|
2010-08-21 11:44:13 +08:00
|
|
|
LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
|
2009-10-29 01:39:19 +08:00
|
|
|
|
|
|
|
// We should not generate __weak write barrier on indirect reference
|
|
|
|
// of a pointer to object; as in void foo (__weak id *param); *param = 0;
|
|
|
|
// But, we continue to generate __strong write barrier on indirect write
|
|
|
|
// into a pointer to object.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getContext().getLangOpts().ObjC1 &&
|
|
|
|
getContext().getLangOpts().getGC() != LangOptions::NonGC &&
|
2009-10-29 01:39:19 +08:00
|
|
|
LV.isObjCWeak())
|
2010-08-21 11:22:38 +08:00
|
|
|
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
|
2009-10-29 01:39:19 +08:00
|
|
|
return LV;
|
|
|
|
}
|
2010-08-25 19:45:40 +08:00
|
|
|
case UO_Real:
|
|
|
|
case UO_Imag: {
|
2007-10-31 06:53:42 +08:00
|
|
|
LValue LV = EmitLValue(E->getSubExpr());
|
2010-12-05 10:00:02 +08:00
|
|
|
assert(LV.isSimple() && "real/imag on non-ordinary l-value");
|
|
|
|
llvm::Value *Addr = LV.getAddress();
|
|
|
|
|
2012-02-19 04:53:32 +08:00
|
|
|
// __real is valid on scalars. This is a faster way of testing that.
|
|
|
|
// __imag can only produce an rvalue on scalars.
|
|
|
|
if (E->getOpcode() == UO_Real &&
|
|
|
|
!cast<llvm::PointerType>(Addr->getType())
|
2010-12-05 10:00:02 +08:00
|
|
|
->getElementType()->isStructTy()) {
|
|
|
|
assert(E->getSubExpr()->getType()->isArithmeticType());
|
|
|
|
return LV;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(E->getSubExpr()->getType()->isAnyComplexType());
|
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
unsigned Idx = E->getOpcode() == UO_Imag;
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
|
2010-12-05 10:00:02 +08:00
|
|
|
Idx, "idx"),
|
2010-08-21 11:08:16 +08:00
|
|
|
ExprTy);
|
2007-10-31 06:53:42 +08:00
|
|
|
}
|
2010-08-25 19:45:40 +08:00
|
|
|
case UO_PreInc:
|
|
|
|
case UO_PreDec: {
|
2010-01-10 05:44:40 +08:00
|
|
|
LValue LV = EmitLValue(E->getSubExpr());
|
2010-08-25 19:45:40 +08:00
|
|
|
bool isInc = E->getOpcode() == UO_PreInc;
|
2010-01-10 05:44:40 +08:00
|
|
|
|
|
|
|
if (E->getType()->isAnyComplexType())
|
|
|
|
EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
|
|
|
|
else
|
|
|
|
EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
|
|
|
|
return LV;
|
|
|
|
}
|
2009-11-09 12:20:47 +08:00
|
|
|
}
|
2007-06-06 04:53:16 +08:00
|
|
|
}
|
|
|
|
|
2007-06-06 12:54:52 +08:00
|
|
|
LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
|
2010-08-21 11:15:20 +08:00
|
|
|
return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
|
|
|
|
E->getType());
|
2007-06-06 12:54:52 +08:00
|
|
|
}
|
|
|
|
|
2009-02-25 06:18:39 +08:00
|
|
|
LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
|
2010-08-21 11:15:20 +08:00
|
|
|
return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
|
|
|
|
E->getType());
|
2009-02-25 06:18:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-21 11:01:12 +08:00
|
|
|
LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
|
|
|
|
switch (E->getIdentType()) {
|
|
|
|
default:
|
|
|
|
return EmitUnsupportedLValue(E, "predefined expression");
|
2008-10-18 05:58:32 +08:00
|
|
|
|
2009-02-25 06:18:39 +08:00
|
|
|
case PredefinedExpr::Func:
|
|
|
|
case PredefinedExpr::Function:
|
2010-08-21 11:01:12 +08:00
|
|
|
case PredefinedExpr::PrettyFunction: {
|
|
|
|
unsigned Type = E->getIdentType();
|
|
|
|
std::string GlobalVarName;
|
|
|
|
|
|
|
|
switch (Type) {
|
2011-09-23 13:06:16 +08:00
|
|
|
default: llvm_unreachable("Invalid type");
|
2010-08-21 11:01:12 +08:00
|
|
|
case PredefinedExpr::Func:
|
|
|
|
GlobalVarName = "__func__.";
|
|
|
|
break;
|
|
|
|
case PredefinedExpr::Function:
|
|
|
|
GlobalVarName = "__FUNCTION__.";
|
|
|
|
break;
|
|
|
|
case PredefinedExpr::PrettyFunction:
|
|
|
|
GlobalVarName = "__PRETTY_FUNCTION__.";
|
|
|
|
break;
|
|
|
|
}
|
2008-10-18 05:58:32 +08:00
|
|
|
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef FnName = CurFn->getName();
|
2010-08-21 11:01:12 +08:00
|
|
|
if (FnName.startswith("\01"))
|
|
|
|
FnName = FnName.substr(1);
|
|
|
|
GlobalVarName += FnName;
|
2009-09-13 07:06:21 +08:00
|
|
|
|
2010-08-21 11:01:12 +08:00
|
|
|
const Decl *CurDecl = CurCodeDecl;
|
|
|
|
if (CurDecl == 0)
|
|
|
|
CurDecl = getContext().getTranslationUnitDecl();
|
2008-10-18 05:58:32 +08:00
|
|
|
|
2010-08-21 11:01:12 +08:00
|
|
|
std::string FunctionName =
|
2011-02-07 18:33:21 +08:00
|
|
|
(isa<BlockDecl>(CurDecl)
|
|
|
|
? FnName.str()
|
|
|
|
: PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl));
|
2008-10-18 05:58:32 +08:00
|
|
|
|
2010-08-21 11:01:12 +08:00
|
|
|
llvm::Constant *C =
|
|
|
|
CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
|
2010-08-21 11:15:20 +08:00
|
|
|
return MakeAddrLValue(C, E->getType());
|
2010-08-21 11:01:12 +08:00
|
|
|
}
|
2008-10-18 05:58:32 +08:00
|
|
|
}
|
2007-07-21 13:21:51 +08:00
|
|
|
}
|
|
|
|
|
2009-12-15 09:22:35 +08:00
|
|
|
llvm::BasicBlock *CodeGenFunction::getTrapBB() {
|
2009-12-15 08:59:40 +08:00
|
|
|
const CodeGenOptions &GCO = CGM.getCodeGenOpts();
|
|
|
|
|
|
|
|
// If we are not optimzing, don't collapse all calls to trap in the function
|
|
|
|
// to the same call, that way, in the debugger they can see which operation
|
2010-07-21 04:19:24 +08:00
|
|
|
// did in fact fail. If we are optimizing, we collapse all calls to trap down
|
2009-12-15 08:59:40 +08:00
|
|
|
// to just one per function to save on codesize.
|
2010-07-21 04:19:24 +08:00
|
|
|
if (GCO.OptimizationLevel && TrapBB)
|
2009-12-15 08:35:12 +08:00
|
|
|
return TrapBB;
|
2009-12-12 09:27:46 +08:00
|
|
|
|
|
|
|
llvm::BasicBlock *Cont = 0;
|
|
|
|
if (HaveInsertPoint()) {
|
|
|
|
Cont = createBasicBlock("cont");
|
|
|
|
EmitBranch(Cont);
|
|
|
|
}
|
2009-12-15 08:35:12 +08:00
|
|
|
TrapBB = createBasicBlock("trap");
|
|
|
|
EmitBlock(TrapBB);
|
|
|
|
|
2011-07-15 01:45:50 +08:00
|
|
|
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
|
2009-12-15 08:35:12 +08:00
|
|
|
llvm::CallInst *TrapCall = Builder.CreateCall(F);
|
|
|
|
TrapCall->setDoesNotReturn();
|
|
|
|
TrapCall->setDoesNotThrow();
|
2009-12-12 09:27:46 +08:00
|
|
|
Builder.CreateUnreachable();
|
|
|
|
|
|
|
|
if (Cont)
|
|
|
|
EmitBlock(Cont);
|
2009-12-15 08:35:12 +08:00
|
|
|
return TrapBB;
|
2009-12-12 09:27:46 +08:00
|
|
|
}
|
|
|
|
|
2010-06-27 07:03:20 +08:00
|
|
|
/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
|
|
|
|
/// array to pointer, return the array subexpression.
|
|
|
|
static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
|
|
|
|
// If this isn't just an array->pointer decay, bail out.
|
|
|
|
const CastExpr *CE = dyn_cast<CastExpr>(E);
|
2010-08-25 19:45:40 +08:00
|
|
|
if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
|
2010-06-27 07:03:20 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
// If this is a decay from variable width array, bail out.
|
|
|
|
const Expr *SubExpr = CE->getSubExpr();
|
|
|
|
if (SubExpr->getType()->isVariableArrayType())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return SubExpr;
|
|
|
|
}
|
|
|
|
|
2007-06-09 07:31:14 +08:00
|
|
|
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
|
2007-08-21 00:18:38 +08:00
|
|
|
// The index must always be an integer, which is not an aggregate. Emit it.
|
2007-08-24 13:35:26 +08:00
|
|
|
llvm::Value *Idx = EmitScalarExpr(E->getIdx());
|
2009-06-07 03:09:26 +08:00
|
|
|
QualType IdxTy = E->getIdx()->getType();
|
2011-05-21 00:38:50 +08:00
|
|
|
bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
|
2009-06-07 03:09:26 +08:00
|
|
|
|
2007-07-11 05:17:59 +08:00
|
|
|
// If the base is a vector type, then we are forming a vector element lvalue
|
|
|
|
// with this subscript.
|
2008-06-14 07:01:12 +08:00
|
|
|
if (E->getBase()->getType()->isVectorType()) {
|
2007-07-11 05:17:59 +08:00
|
|
|
// Emit the vector as an lvalue to get its address.
|
2008-06-14 07:01:12 +08:00
|
|
|
LValue LHS = EmitLValue(E->getBase());
|
2007-08-21 00:18:38 +08:00
|
|
|
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
|
2011-02-08 16:22:06 +08:00
|
|
|
Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
|
2008-06-14 07:01:12 +08:00
|
|
|
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
|
2012-03-23 06:36:39 +08:00
|
|
|
E->getBase()->getType(), LHS.getAlignment());
|
2007-07-11 05:17:59 +08:00
|
|
|
}
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2007-08-21 00:18:38 +08:00
|
|
|
// Extend or truncate the index type to 32 or 64-bits.
|
2011-02-15 17:22:45 +08:00
|
|
|
if (Idx->getType() != IntPtrTy)
|
|
|
|
Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
|
2010-06-27 06:40:46 +08:00
|
|
|
|
2009-12-16 10:57:00 +08:00
|
|
|
// FIXME: As llvm implements the object size checking, this can come out.
|
2009-12-12 09:27:46 +08:00
|
|
|
if (CatchUndefined) {
|
2010-06-27 06:40:46 +08:00
|
|
|
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
|
2009-12-12 09:27:46 +08:00
|
|
|
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
|
2010-08-25 19:45:40 +08:00
|
|
|
if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
|
2009-12-12 09:27:46 +08:00
|
|
|
if (const ConstantArrayType *CAT
|
|
|
|
= getContext().getAsConstantArrayType(DRE->getType())) {
|
|
|
|
llvm::APInt Size = CAT->getSize();
|
|
|
|
llvm::BasicBlock *Cont = createBasicBlock("cont");
|
2009-12-15 06:14:31 +08:00
|
|
|
Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
|
2009-12-12 09:27:46 +08:00
|
|
|
llvm::ConstantInt::get(Idx->getType(), Size)),
|
2009-12-15 08:35:12 +08:00
|
|
|
Cont, getTrapBB());
|
2009-12-15 04:52:00 +08:00
|
|
|
EmitBlock(Cont);
|
2009-12-12 09:27:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-09 21:00:44 +08:00
|
|
|
// We know that the pointer points to a type of the correct size, unless the
|
|
|
|
// size is a VLA or Objective-C interface.
|
2009-04-25 13:08:32 +08:00
|
|
|
llvm::Value *Address = 0;
|
2011-12-03 12:14:32 +08:00
|
|
|
CharUnits ArrayAlignment;
|
2011-06-25 05:55:10 +08:00
|
|
|
if (const VariableArrayType *vla =
|
2008-12-21 08:11:23 +08:00
|
|
|
getContext().getAsVariableArrayType(E->getType())) {
|
2011-06-25 05:55:10 +08:00
|
|
|
// The base must be a pointer, which is not an aggregate. Emit
|
|
|
|
// it. It needs to be emitted first in case it's what captures
|
|
|
|
// the VLA bounds.
|
|
|
|
Address = EmitScalarExpr(E->getBase());
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2011-06-25 05:55:10 +08:00
|
|
|
// The element count here is the total number of non-VLA elements.
|
|
|
|
llvm::Value *numElements = getVLASize(vla).first;
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2011-06-25 09:32:37 +08:00
|
|
|
// Effectively, the multiply by the VLA size is part of the GEP.
|
|
|
|
// GEP indexes are signed, and scaling an index isn't permitted to
|
|
|
|
// signed-overflow, so we use the same semantics for our explicit
|
|
|
|
// multiply. We suppress this if overflow is not undefined behavior.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getLangOpts().isSignedOverflowDefined()) {
|
2011-06-25 09:32:37 +08:00
|
|
|
Idx = Builder.CreateMul(Idx, numElements);
|
2011-03-01 08:03:48 +08:00
|
|
|
Address = Builder.CreateGEP(Address, Idx, "arrayidx");
|
2011-06-25 09:32:37 +08:00
|
|
|
} else {
|
|
|
|
Idx = Builder.CreateNSWMul(Idx, numElements);
|
2011-03-01 08:03:48 +08:00
|
|
|
Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
|
2011-06-25 09:32:37 +08:00
|
|
|
}
|
2010-06-27 07:03:20 +08:00
|
|
|
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
|
|
|
|
// Indexing over an interface, as in "NSString *P; P[4];"
|
2009-09-09 21:00:44 +08:00
|
|
|
llvm::Value *InterfaceSize =
|
2009-07-25 07:12:58 +08:00
|
|
|
llvm::ConstantInt::get(Idx->getType(),
|
2010-01-12 01:06:35 +08:00
|
|
|
getContext().getTypeSizeInChars(OIT).getQuantity());
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2009-04-25 13:08:32 +08:00
|
|
|
Idx = Builder.CreateMul(Idx, InterfaceSize);
|
|
|
|
|
2010-06-27 07:03:20 +08:00
|
|
|
// The base must be a pointer, which is not an aggregate. Emit it.
|
|
|
|
llvm::Value *Base = EmitScalarExpr(E->getBase());
|
2011-02-08 16:22:06 +08:00
|
|
|
Address = EmitCastToVoidPtr(Base);
|
|
|
|
Address = Builder.CreateGEP(Address, Idx, "arrayidx");
|
2009-04-25 13:08:32 +08:00
|
|
|
Address = Builder.CreateBitCast(Address, Base->getType());
|
2010-06-27 07:03:20 +08:00
|
|
|
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
|
|
|
|
// If this is A[i] where A is an array, the frontend will have decayed the
|
|
|
|
// base to be a ArrayToPointerDecay implicit cast. While correct, it is
|
|
|
|
// inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
|
|
|
|
// "gep x, i" here. Emit one "gep A, 0, i".
|
|
|
|
assert(Array->getType()->isArrayType() &&
|
|
|
|
"Array to pointer decay must have array source type!");
|
2011-04-01 08:49:43 +08:00
|
|
|
LValue ArrayLV = EmitLValue(Array);
|
|
|
|
llvm::Value *ArrayPtr = ArrayLV.getAddress();
|
2010-06-27 07:03:20 +08:00
|
|
|
llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
|
|
|
|
llvm::Value *Args[] = { Zero, Idx };
|
|
|
|
|
2011-04-01 08:49:43 +08:00
|
|
|
// Propagate the alignment from the array itself to the result.
|
|
|
|
ArrayAlignment = ArrayLV.getAlignment();
|
|
|
|
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getContext().getLangOpts().isSignedOverflowDefined())
|
2011-07-22 16:16:57 +08:00
|
|
|
Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
|
2011-03-01 08:03:48 +08:00
|
|
|
else
|
2011-07-22 16:16:57 +08:00
|
|
|
Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
|
2009-04-25 13:08:32 +08:00
|
|
|
} else {
|
2010-06-27 07:03:20 +08:00
|
|
|
// The base must be a pointer, which is not an aggregate. Emit it.
|
|
|
|
llvm::Value *Base = EmitScalarExpr(E->getBase());
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getContext().getLangOpts().isSignedOverflowDefined())
|
2011-03-01 08:03:48 +08:00
|
|
|
Address = Builder.CreateGEP(Base, Idx, "arrayidx");
|
|
|
|
else
|
|
|
|
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
|
2008-12-21 08:11:23 +08:00
|
|
|
}
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2009-07-11 07:34:53 +08:00
|
|
|
QualType T = E->getBase()->getType()->getPointeeType();
|
2009-09-09 21:00:44 +08:00
|
|
|
assert(!T.isNull() &&
|
2009-07-11 07:34:53 +08:00
|
|
|
"CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2012-01-05 06:35:55 +08:00
|
|
|
|
2011-04-01 08:49:43 +08:00
|
|
|
// Limit the alignment to that of the result type.
|
2012-01-05 06:35:55 +08:00
|
|
|
LValue LV;
|
2011-12-03 12:14:32 +08:00
|
|
|
if (!ArrayAlignment.isZero()) {
|
|
|
|
CharUnits Align = getContext().getTypeAlignInChars(T);
|
2011-04-01 08:49:43 +08:00
|
|
|
ArrayAlignment = std::min(Align, ArrayAlignment);
|
2012-01-05 06:35:55 +08:00
|
|
|
LV = MakeAddrLValue(Address, T, ArrayAlignment);
|
|
|
|
} else {
|
|
|
|
LV = MakeNaturalAlignAddrLValue(Address, T);
|
2011-04-01 08:49:43 +08:00
|
|
|
}
|
|
|
|
|
2010-08-21 11:44:13 +08:00
|
|
|
LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
|
2009-09-25 03:53:00 +08:00
|
|
|
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getContext().getLangOpts().ObjC1 &&
|
|
|
|
getContext().getLangOpts().getGC() != LangOptions::NonGC) {
|
2010-08-21 11:22:38 +08:00
|
|
|
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
|
2009-09-17 05:37:16 +08:00
|
|
|
setObjCGCLValueClass(getContext(), E, LV);
|
|
|
|
}
|
2009-02-22 07:37:19 +08:00
|
|
|
return LV;
|
2007-06-09 07:31:14 +08:00
|
|
|
}
|
|
|
|
|
2009-09-09 21:00:44 +08:00
|
|
|
static
|
2012-01-25 16:58:21 +08:00
|
|
|
llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<unsigned, 4> &Elts) {
|
|
|
|
SmallVector<llvm::Constant*, 4> CElts;
|
2008-05-14 05:03:02 +08:00
|
|
|
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
|
2012-01-25 13:34:41 +08:00
|
|
|
CElts.push_back(Builder.getInt32(Elts[i]));
|
2008-05-14 05:03:02 +08:00
|
|
|
|
2011-02-15 08:14:06 +08:00
|
|
|
return llvm::ConstantVector::get(CElts);
|
2008-05-14 05:03:02 +08:00
|
|
|
}
|
|
|
|
|
2007-08-03 07:37:31 +08:00
|
|
|
LValue CodeGenFunction::
|
2008-04-19 07:10:10 +08:00
|
|
|
EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
|
2007-08-03 07:37:31 +08:00
|
|
|
// Emit the base vector as an l-value.
|
2009-02-17 05:11:58 +08:00
|
|
|
LValue Base;
|
|
|
|
|
|
|
|
// ExtVectorElementExpr's base can either be a vector or pointer to vector.
|
2009-12-24 05:31:11 +08:00
|
|
|
if (E->isArrow()) {
|
|
|
|
// If it is a pointer to a vector, emit the address and form an lvalue with
|
|
|
|
// it.
|
2009-02-17 06:14:05 +08:00
|
|
|
llvm::Value *Ptr = EmitScalarExpr(E->getBase());
|
2009-12-24 05:31:11 +08:00
|
|
|
const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
|
2010-08-21 11:44:13 +08:00
|
|
|
Base = MakeAddrLValue(Ptr, PT->getPointeeType());
|
|
|
|
Base.getQuals().removeObjCGCAttr();
|
2010-11-24 13:12:34 +08:00
|
|
|
} else if (E->getBase()->isGLValue()) {
|
2009-12-24 05:31:11 +08:00
|
|
|
// Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
|
|
|
|
// emit the base as an lvalue.
|
|
|
|
assert(E->getBase()->getType()->isVectorType());
|
|
|
|
Base = EmitLValue(E->getBase());
|
|
|
|
} else {
|
|
|
|
// Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
|
2011-06-16 12:16:24 +08:00
|
|
|
assert(E->getBase()->getType()->isVectorType() &&
|
2010-01-05 02:02:28 +08:00
|
|
|
"Result must be a vector");
|
2009-12-24 05:31:11 +08:00
|
|
|
llvm::Value *Vec = EmitScalarExpr(E->getBase());
|
|
|
|
|
2009-12-24 05:33:41 +08:00
|
|
|
// Store the vector to memory (because LValue wants an address).
|
2010-02-09 10:48:28 +08:00
|
|
|
llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
|
2009-12-24 05:31:11 +08:00
|
|
|
Builder.CreateStore(Vec, VecMem);
|
2010-08-21 11:44:13 +08:00
|
|
|
Base = MakeAddrLValue(VecMem, E->getBase()->getType());
|
2009-02-17 05:11:58 +08:00
|
|
|
}
|
2011-06-16 12:16:24 +08:00
|
|
|
|
|
|
|
QualType type =
|
|
|
|
E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
|
2009-12-24 05:31:11 +08:00
|
|
|
|
2008-05-14 05:03:02 +08:00
|
|
|
// Encode the element access list into a vector of unsigned indices.
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<unsigned, 4> Indices;
|
2008-05-14 05:03:02 +08:00
|
|
|
E->getEncodedElementAccess(Indices);
|
|
|
|
|
|
|
|
if (Base.isSimple()) {
|
2012-01-25 13:34:41 +08:00
|
|
|
llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
|
2012-03-23 06:36:39 +08:00
|
|
|
return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
|
|
|
|
Base.getAlignment());
|
2008-05-09 14:41:27 +08:00
|
|
|
}
|
2008-05-14 05:03:02 +08:00
|
|
|
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
|
|
|
|
|
|
|
|
llvm::Constant *BaseElts = Base.getExtVectorElts();
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<llvm::Constant *, 4> CElts;
|
2007-08-03 07:37:31 +08:00
|
|
|
|
2012-01-30 14:20:36 +08:00
|
|
|
for (unsigned i = 0, e = Indices.size(); i != e; ++i)
|
|
|
|
CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
|
2011-02-15 08:14:06 +08:00
|
|
|
llvm::Constant *CV = llvm::ConstantVector::get(CElts);
|
2012-03-23 06:36:39 +08:00
|
|
|
return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
|
|
|
|
Base.getAlignment());
|
2007-08-03 07:37:31 +08:00
|
|
|
}
|
|
|
|
|
2007-10-24 04:28:39 +08:00
|
|
|
LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
|
2009-02-21 08:30:43 +08:00
|
|
|
bool isNonGC = false;
|
2007-10-25 06:26:28 +08:00
|
|
|
Expr *BaseExpr = E->getBase();
|
|
|
|
llvm::Value *BaseValue = NULL;
|
2009-09-25 03:53:00 +08:00
|
|
|
Qualifiers BaseQuals;
|
2008-06-14 07:01:12 +08:00
|
|
|
|
2007-12-03 02:52:07 +08:00
|
|
|
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
|
2007-12-12 05:33:16 +08:00
|
|
|
if (E->isArrow()) {
|
2007-12-03 02:52:07 +08:00
|
|
|
BaseValue = EmitScalarExpr(BaseExpr);
|
2009-09-09 21:00:44 +08:00
|
|
|
const PointerType *PTy =
|
2009-07-30 05:53:49 +08:00
|
|
|
BaseExpr->getType()->getAs<PointerType>();
|
2009-09-25 03:53:00 +08:00
|
|
|
BaseQuals = PTy->getPointeeType().getQualifiers();
|
2009-02-17 06:25:49 +08:00
|
|
|
} else {
|
2007-10-25 06:26:28 +08:00
|
|
|
LValue BaseLV = EmitLValue(BaseExpr);
|
2009-02-21 08:30:43 +08:00
|
|
|
if (BaseLV.isNonGC())
|
|
|
|
isNonGC = true;
|
2007-12-03 02:52:07 +08:00
|
|
|
// FIXME: this isn't right for bitfields.
|
2007-10-25 06:26:28 +08:00
|
|
|
BaseValue = BaseLV.getAddress();
|
2009-07-29 08:44:13 +08:00
|
|
|
QualType BaseTy = BaseExpr->getType();
|
2009-09-25 03:53:00 +08:00
|
|
|
BaseQuals = BaseTy.getQualifiers();
|
2007-12-03 02:52:07 +08:00
|
|
|
}
|
2007-10-24 04:28:39 +08:00
|
|
|
|
2009-11-08 07:06:58 +08:00
|
|
|
NamedDecl *ND = E->getMemberDecl();
|
|
|
|
if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
|
2010-01-29 13:05:36 +08:00
|
|
|
LValue LV = EmitLValueForField(BaseValue, Field,
|
2009-11-08 07:06:58 +08:00
|
|
|
BaseQuals.getCVRQualifiers());
|
2010-08-21 11:22:38 +08:00
|
|
|
LV.setNonGC(isNonGC);
|
2009-11-08 07:06:58 +08:00
|
|
|
setObjCGCLValueClass(getContext(), E, LV);
|
|
|
|
return LV;
|
|
|
|
}
|
|
|
|
|
2009-11-08 07:16:50 +08:00
|
|
|
if (VarDecl *VD = dyn_cast<VarDecl>(ND))
|
|
|
|
return EmitGlobalVarDeclLValue(*this, E, VD);
|
2009-11-26 14:08:14 +08:00
|
|
|
|
|
|
|
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
|
|
|
|
return EmitFunctionDeclLValue(*this, E, FD);
|
|
|
|
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable("Unhandled member declaration!");
|
2008-02-09 16:50:58 +08:00
|
|
|
}
|
2007-10-24 04:28:39 +08:00
|
|
|
|
2010-09-06 08:11:41 +08:00
|
|
|
LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
|
|
|
|
const FieldDecl *Field,
|
2009-02-04 03:03:09 +08:00
|
|
|
unsigned CVRQualifiers) {
|
2010-03-31 09:09:11 +08:00
|
|
|
const CGRecordLayout &RL =
|
|
|
|
CGM.getTypes().getCGRecordLayout(Field->getParent());
|
2010-04-06 00:20:44 +08:00
|
|
|
const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
|
2010-04-08 10:59:45 +08:00
|
|
|
return LValue::MakeBitfield(BaseValue, Info,
|
2011-06-16 12:16:24 +08:00
|
|
|
Field->getType().withCVRQualifiers(CVRQualifiers));
|
2008-12-16 04:35:07 +08:00
|
|
|
}
|
|
|
|
|
2010-05-21 09:18:57 +08:00
|
|
|
/// EmitLValueForAnonRecordField - Given that the field is a member of
|
|
|
|
/// an anonymous struct or union buried inside a record, and given
|
|
|
|
/// that the base value is a pointer to the enclosing record, derive
|
|
|
|
/// an lvalue for the ultimate field.
|
|
|
|
LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
|
2010-12-04 17:14:42 +08:00
|
|
|
const IndirectFieldDecl *Field,
|
2010-05-21 09:18:57 +08:00
|
|
|
unsigned CVRQualifiers) {
|
2010-12-04 17:14:42 +08:00
|
|
|
IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
|
|
|
|
IEnd = Field->chain_end();
|
2010-05-21 09:18:57 +08:00
|
|
|
while (true) {
|
2011-05-23 06:09:06 +08:00
|
|
|
LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I),
|
|
|
|
CVRQualifiers);
|
2010-12-04 17:14:42 +08:00
|
|
|
if (++I == IEnd) return LV;
|
2010-05-21 09:18:57 +08:00
|
|
|
|
|
|
|
assert(LV.isSimple());
|
|
|
|
BaseValue = LV.getAddress();
|
|
|
|
CVRQualifiers |= LV.getVRQualifiers();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-26 16:07:02 +08:00
|
|
|
LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
|
|
|
|
const FieldDecl *field,
|
|
|
|
unsigned cvr) {
|
|
|
|
if (field->isBitField())
|
|
|
|
return EmitLValueForBitfield(baseAddr, field, cvr);
|
|
|
|
|
|
|
|
const RecordDecl *rec = field->getParent();
|
|
|
|
QualType type = field->getType();
|
2011-12-03 12:14:32 +08:00
|
|
|
CharUnits alignment = getContext().getDeclAlign(field);
|
2011-02-26 16:07:02 +08:00
|
|
|
|
|
|
|
bool mayAlias = rec->hasAttr<MayAliasAttr>();
|
|
|
|
|
2011-07-10 13:34:54 +08:00
|
|
|
llvm::Value *addr = baseAddr;
|
2011-02-26 16:07:02 +08:00
|
|
|
if (rec->isUnion()) {
|
2011-07-10 13:34:54 +08:00
|
|
|
// For unions, there is no pointer adjustment.
|
2011-02-26 16:07:02 +08:00
|
|
|
assert(!type->isReferenceType() && "union has reference member");
|
|
|
|
} else {
|
|
|
|
// For structs, we GEP to the field that the record layout suggests.
|
|
|
|
unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
|
2011-07-10 13:34:54 +08:00
|
|
|
addr = Builder.CreateStructGEP(addr, idx, field->getName());
|
2011-02-26 16:07:02 +08:00
|
|
|
|
|
|
|
// If this is a reference field, load the reference right now.
|
|
|
|
if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
|
|
|
|
llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
|
|
|
|
if (cvr & Qualifiers::Volatile) load->setVolatile(true);
|
2011-12-03 12:14:32 +08:00
|
|
|
load->setAlignment(alignment.getQuantity());
|
2011-02-26 16:07:02 +08:00
|
|
|
|
|
|
|
if (CGM.shouldUseTBAA()) {
|
|
|
|
llvm::MDNode *tbaa;
|
|
|
|
if (mayAlias)
|
|
|
|
tbaa = CGM.getTBAAInfo(getContext().CharTy);
|
|
|
|
else
|
|
|
|
tbaa = CGM.getTBAAInfo(type);
|
|
|
|
CGM.DecorateInstruction(load, tbaa);
|
|
|
|
}
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2011-02-26 16:07:02 +08:00
|
|
|
addr = load;
|
|
|
|
mayAlias = false;
|
|
|
|
type = refType->getPointeeType();
|
2011-11-16 08:42:57 +08:00
|
|
|
if (type->isIncompleteType())
|
2011-12-03 12:14:32 +08:00
|
|
|
alignment = CharUnits();
|
2011-11-16 08:42:57 +08:00
|
|
|
else
|
2011-12-03 12:14:32 +08:00
|
|
|
alignment = getContext().getTypeAlignInChars(type);
|
2011-02-26 16:07:02 +08:00
|
|
|
cvr = 0; // qualifiers don't recursively apply to referencee
|
|
|
|
}
|
2007-10-27 03:42:18 +08:00
|
|
|
}
|
2011-07-10 13:34:54 +08:00
|
|
|
|
|
|
|
// Make sure that the address is pointing to the right type. This is critical
|
|
|
|
// for both unions and structs. A union needs a bitcast, a struct element
|
|
|
|
// will need a bitcast if the LLVM type laid out doesn't match the desired
|
|
|
|
// type.
|
2011-07-12 16:58:26 +08:00
|
|
|
addr = EmitBitCastOfLValueToProperType(*this, addr,
|
2011-07-12 14:52:18 +08:00
|
|
|
CGM.getTypes().ConvertTypeForMem(type),
|
|
|
|
field->getName());
|
2009-09-25 03:53:00 +08:00
|
|
|
|
2011-09-10 06:41:49 +08:00
|
|
|
if (field->hasAttr<AnnotateAttr>())
|
|
|
|
addr = EmitFieldAnnotations(field, addr);
|
|
|
|
|
2011-02-26 16:07:02 +08:00
|
|
|
LValue LV = MakeAddrLValue(addr, type, alignment);
|
|
|
|
LV.getQuals().addCVRQualifiers(cvr);
|
2010-08-21 11:44:13 +08:00
|
|
|
|
2009-09-22 02:54:29 +08:00
|
|
|
// __weak attribute on a field is ignored.
|
2010-08-21 11:44:13 +08:00
|
|
|
if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
|
|
|
|
LV.getQuals().removeObjCGCAttr();
|
2011-02-26 16:07:02 +08:00
|
|
|
|
|
|
|
// Fields of may_alias structs act like 'char' for TBAA purposes.
|
|
|
|
// FIXME: this should get propagated down through anonymous structs
|
|
|
|
// and unions.
|
|
|
|
if (mayAlias && LV.getTBAAInfo())
|
|
|
|
LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
|
|
|
|
|
2010-08-21 11:44:13 +08:00
|
|
|
return LV;
|
2007-10-24 04:28:39 +08:00
|
|
|
}
|
|
|
|
|
2010-01-29 13:24:29 +08:00
|
|
|
LValue
|
2010-09-06 08:11:41 +08:00
|
|
|
CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
|
|
|
|
const FieldDecl *Field,
|
2010-01-29 13:24:29 +08:00
|
|
|
unsigned CVRQualifiers) {
|
|
|
|
QualType FieldType = Field->getType();
|
|
|
|
|
|
|
|
if (!FieldType->isReferenceType())
|
|
|
|
return EmitLValueForField(BaseValue, Field, CVRQualifiers);
|
|
|
|
|
2010-03-31 09:09:11 +08:00
|
|
|
const CGRecordLayout &RL =
|
|
|
|
CGM.getTypes().getCGRecordLayout(Field->getParent());
|
|
|
|
unsigned idx = RL.getLLVMFieldNo(Field);
|
2011-09-28 05:06:10 +08:00
|
|
|
llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx);
|
2010-01-29 13:24:29 +08:00
|
|
|
assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
|
|
|
|
|
2011-07-10 13:53:24 +08:00
|
|
|
|
|
|
|
// Make sure that the address is pointing to the right type. This is critical
|
|
|
|
// for both unions and structs. A union needs a bitcast, a struct element
|
|
|
|
// will need a bitcast if the LLVM type laid out doesn't match the desired
|
|
|
|
// type.
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *llvmType = ConvertTypeForMem(FieldType);
|
2011-07-10 13:53:24 +08:00
|
|
|
unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
|
|
|
|
V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS));
|
|
|
|
|
2011-12-03 12:14:32 +08:00
|
|
|
CharUnits Alignment = getContext().getDeclAlign(Field);
|
2010-08-21 12:20:22 +08:00
|
|
|
return MakeAddrLValue(V, FieldType, Alignment);
|
2010-01-29 13:24:29 +08:00
|
|
|
}
|
|
|
|
|
2010-09-06 08:11:41 +08:00
|
|
|
LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
|
2011-11-23 06:48:32 +08:00
|
|
|
if (E->isFileScope()) {
|
|
|
|
llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
|
|
|
|
return MakeAddrLValue(GlobalPtr, E->getType());
|
|
|
|
}
|
|
|
|
|
2010-02-17 03:43:39 +08:00
|
|
|
llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
|
2010-09-06 08:11:41 +08:00
|
|
|
const Expr *InitExpr = E->getInitializer();
|
2010-08-21 11:08:16 +08:00
|
|
|
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
|
2008-05-14 07:18:27 +08:00
|
|
|
|
2012-03-30 01:37:10 +08:00
|
|
|
EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
|
|
|
|
/*Init*/ true);
|
2008-05-14 07:18:27 +08:00
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
LValue CodeGenFunction::
|
|
|
|
EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
|
|
|
|
if (!expr->isGLValue()) {
|
2011-01-27 03:21:13 +08:00
|
|
|
// ?: here should be an aggregate.
|
2011-02-17 18:25:35 +08:00
|
|
|
assert((hasAggregateLLVMType(expr->getType()) &&
|
|
|
|
!expr->getType()->isAnyComplexType()) &&
|
2011-01-27 03:21:13 +08:00
|
|
|
"Unexpected conditional operator!");
|
2011-02-17 18:25:35 +08:00
|
|
|
return EmitAggExprToLValue(expr);
|
2011-01-27 03:21:13 +08:00
|
|
|
}
|
2009-12-25 13:29:40 +08:00
|
|
|
|
2012-01-25 13:04:17 +08:00
|
|
|
OpaqueValueMapping binding(*this, expr);
|
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
const Expr *condExpr = expr->getCond();
|
2011-02-28 07:02:32 +08:00
|
|
|
bool CondExprBool;
|
|
|
|
if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
|
2011-02-17 18:25:35 +08:00
|
|
|
const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
|
2011-02-28 07:02:32 +08:00
|
|
|
if (!CondExprBool) std::swap(live, dead);
|
2011-02-17 18:25:35 +08:00
|
|
|
|
|
|
|
if (!ContainsLabel(dead))
|
|
|
|
return EmitLValue(live);
|
2011-01-27 03:21:13 +08:00
|
|
|
}
|
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
|
|
|
|
llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
|
|
|
|
llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
|
2011-01-26 12:00:11 +08:00
|
|
|
|
2011-01-27 03:21:13 +08:00
|
|
|
ConditionalEvaluation eval(*this);
|
2011-02-17 18:25:35 +08:00
|
|
|
EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
|
2009-09-16 00:35:24 +08:00
|
|
|
|
2011-01-27 03:21:13 +08:00
|
|
|
// Any temporaries created here are conditional.
|
2011-02-17 18:25:35 +08:00
|
|
|
EmitBlock(lhsBlock);
|
2011-01-27 03:21:13 +08:00
|
|
|
eval.begin(*this);
|
2011-02-17 18:25:35 +08:00
|
|
|
LValue lhs = EmitLValue(expr->getTrueExpr());
|
2011-01-27 03:21:13 +08:00
|
|
|
eval.end(*this);
|
2010-02-05 01:26:01 +08:00
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
if (!lhs.isSimple())
|
|
|
|
return EmitUnsupportedLValue(expr, "conditional operator");
|
2009-09-16 00:35:24 +08:00
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
lhsBlock = Builder.GetInsertBlock();
|
|
|
|
Builder.CreateBr(contBlock);
|
2009-09-16 00:35:24 +08:00
|
|
|
|
2011-01-27 03:21:13 +08:00
|
|
|
// Any temporaries created here are conditional.
|
2011-02-17 18:25:35 +08:00
|
|
|
EmitBlock(rhsBlock);
|
2011-01-27 03:21:13 +08:00
|
|
|
eval.begin(*this);
|
2011-02-17 18:25:35 +08:00
|
|
|
LValue rhs = EmitLValue(expr->getFalseExpr());
|
2011-01-27 03:21:13 +08:00
|
|
|
eval.end(*this);
|
2011-02-17 18:25:35 +08:00
|
|
|
if (!rhs.isSimple())
|
|
|
|
return EmitUnsupportedLValue(expr, "conditional operator");
|
|
|
|
rhsBlock = Builder.GetInsertBlock();
|
2011-01-27 03:21:13 +08:00
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
EmitBlock(contBlock);
|
2011-01-27 03:21:13 +08:00
|
|
|
|
2011-03-30 19:28:58 +08:00
|
|
|
llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
|
2011-01-27 03:21:13 +08:00
|
|
|
"cond-lvalue");
|
2011-02-17 18:25:35 +08:00
|
|
|
phi->addIncoming(lhs.getAddress(), lhsBlock);
|
|
|
|
phi->addIncoming(rhs.getAddress(), rhsBlock);
|
|
|
|
return MakeAddrLValue(phi, expr->getType());
|
2009-03-24 10:38:23 +08:00
|
|
|
}
|
|
|
|
|
2009-11-16 14:50:58 +08:00
|
|
|
/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
|
|
|
|
/// If the cast is a dynamic_cast, we can have the usual lvalue result,
|
|
|
|
/// otherwise if a cast is needed by the code generator in an lvalue context,
|
|
|
|
/// then it must mean that we need the address of an aggregate in order to
|
|
|
|
/// access one of its fields. This can happen for all the reasons that casts
|
|
|
|
/// are permitted with aggregate result, including noop aggregate casts, and
|
|
|
|
/// cast from scalar to union.
|
2009-03-19 02:28:57 +08:00
|
|
|
LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
|
2009-09-13 00:16:49 +08:00
|
|
|
switch (E->getCastKind()) {
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_ToVoid:
|
2009-11-16 13:48:01 +08:00
|
|
|
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
|
2010-11-15 17:13:47 +08:00
|
|
|
|
|
|
|
case CK_Dependent:
|
|
|
|
llvm_unreachable("dependent cast kind in IR gen!");
|
2012-01-17 01:27:18 +08:00
|
|
|
|
|
|
|
// These two casts are currently treated as no-ops, although they could
|
|
|
|
// potentially be real operations depending on the target's ABI.
|
|
|
|
case CK_NonAtomicToAtomic:
|
|
|
|
case CK_AtomicToNonAtomic:
|
2010-11-15 17:13:47 +08:00
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_NoOp:
|
2011-01-28 07:22:05 +08:00
|
|
|
case CK_LValueToRValue:
|
|
|
|
if (!E->getSubExpr()->Classify(getContext()).isPRValue()
|
|
|
|
|| E->getType()->isRecordType())
|
2010-12-04 16:14:53 +08:00
|
|
|
return EmitLValue(E->getSubExpr());
|
2010-07-16 02:58:16 +08:00
|
|
|
// Fall through to synthesize a temporary.
|
2010-11-15 17:13:47 +08:00
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_BitCast:
|
|
|
|
case CK_ArrayToPointerDecay:
|
|
|
|
case CK_FunctionToPointerDecay:
|
|
|
|
case CK_NullToMemberPointer:
|
2010-11-13 09:35:44 +08:00
|
|
|
case CK_NullToPointer:
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_IntegralToPointer:
|
|
|
|
case CK_PointerToIntegral:
|
2010-11-15 17:13:47 +08:00
|
|
|
case CK_PointerToBoolean:
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_VectorSplat:
|
|
|
|
case CK_IntegralCast:
|
2010-11-15 17:13:47 +08:00
|
|
|
case CK_IntegralToBoolean:
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_IntegralToFloating:
|
|
|
|
case CK_FloatingToIntegral:
|
2010-11-15 17:13:47 +08:00
|
|
|
case CK_FloatingToBoolean:
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_FloatingCast:
|
2010-11-13 17:02:35 +08:00
|
|
|
case CK_FloatingRealToComplex:
|
2010-11-14 16:17:51 +08:00
|
|
|
case CK_FloatingComplexToReal:
|
|
|
|
case CK_FloatingComplexToBoolean:
|
2010-11-13 17:02:35 +08:00
|
|
|
case CK_FloatingComplexCast:
|
2010-11-14 16:17:51 +08:00
|
|
|
case CK_FloatingComplexToIntegralComplex:
|
2010-11-13 17:02:35 +08:00
|
|
|
case CK_IntegralRealToComplex:
|
2010-11-14 16:17:51 +08:00
|
|
|
case CK_IntegralComplexToReal:
|
|
|
|
case CK_IntegralComplexToBoolean:
|
2010-11-13 17:02:35 +08:00
|
|
|
case CK_IntegralComplexCast:
|
2010-11-14 16:17:51 +08:00
|
|
|
case CK_IntegralComplexToFloatingComplex:
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_DerivedToBaseMemberPointer:
|
|
|
|
case CK_BaseToDerivedMemberPointer:
|
|
|
|
case CK_MemberPointerToBoolean:
|
2012-02-15 09:22:51 +08:00
|
|
|
case CK_ReinterpretMemberPointer:
|
2011-06-16 07:02:42 +08:00
|
|
|
case CK_AnyPointerToBlockPointerCast:
|
2011-09-10 14:18:15 +08:00
|
|
|
case CK_ARCProduceObject:
|
|
|
|
case CK_ARCConsumeObject:
|
|
|
|
case CK_ARCReclaimReturnedObject:
|
2012-02-22 13:02:47 +08:00
|
|
|
case CK_ARCExtendBlockObject:
|
|
|
|
case CK_CopyAndAutoreleaseBlockObject: {
|
2010-07-16 02:58:16 +08:00
|
|
|
// These casts only produce lvalues when we're binding a reference to a
|
|
|
|
// temporary realized from a (converted) pure rvalue. Emit the expression
|
|
|
|
// as a value, copy it into a temporary, and return an lvalue referring to
|
|
|
|
// that temporary.
|
|
|
|
llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
|
2012-03-30 01:37:10 +08:00
|
|
|
EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(V, E->getType());
|
2010-07-16 02:58:16 +08:00
|
|
|
}
|
2009-11-16 13:48:01 +08:00
|
|
|
|
2011-04-11 10:03:26 +08:00
|
|
|
case CK_Dynamic: {
|
2009-11-16 14:50:58 +08:00
|
|
|
LValue LV = EmitLValue(E->getSubExpr());
|
|
|
|
llvm::Value *V = LV.getAddress();
|
|
|
|
const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
|
2009-11-16 14:50:58 +08:00
|
|
|
}
|
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_ConstructorConversion:
|
|
|
|
case CK_UserDefinedConversion:
|
2011-09-09 13:25:32 +08:00
|
|
|
case CK_CPointerToObjCPointerCast:
|
|
|
|
case CK_BlockPointerToObjCPointerCast:
|
2009-03-19 02:28:57 +08:00
|
|
|
return EmitLValue(E->getSubExpr());
|
2009-09-13 00:16:49 +08:00
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_UncheckedDerivedToBase:
|
|
|
|
case CK_DerivedToBase: {
|
2009-09-13 00:16:49 +08:00
|
|
|
const RecordType *DerivedClassTy =
|
|
|
|
E->getSubExpr()->getType()->getAs<RecordType>();
|
|
|
|
CXXRecordDecl *DerivedClassDecl =
|
|
|
|
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
|
|
|
|
|
|
|
|
LValue LV = EmitLValue(E->getSubExpr());
|
2010-12-04 16:14:53 +08:00
|
|
|
llvm::Value *This = LV.getAddress();
|
2009-09-13 00:16:49 +08:00
|
|
|
|
|
|
|
// Perform the derived-to-base conversion
|
|
|
|
llvm::Value *Base =
|
2010-06-18 07:00:29 +08:00
|
|
|
GetAddressOfBaseClass(This, DerivedClassDecl,
|
2010-08-07 14:22:56 +08:00
|
|
|
E->path_begin(), E->path_end(),
|
|
|
|
/*NullCheckValue=*/false);
|
2009-09-13 00:16:49 +08:00
|
|
|
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(Base, E->getType());
|
2009-09-13 00:16:49 +08:00
|
|
|
}
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_ToUnion:
|
2010-02-06 04:02:42 +08:00
|
|
|
return EmitAggExprToLValue(E);
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_BaseToDerived: {
|
2009-11-24 01:57:54 +08:00
|
|
|
const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
|
|
|
|
CXXRecordDecl *DerivedClassDecl =
|
|
|
|
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
|
|
|
|
|
|
|
|
LValue LV = EmitLValue(E->getSubExpr());
|
|
|
|
|
|
|
|
// Perform the base-to-derived conversion
|
|
|
|
llvm::Value *Derived =
|
2010-04-25 05:23:59 +08:00
|
|
|
GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
|
2010-08-07 14:22:56 +08:00
|
|
|
E->path_begin(), E->path_end(),
|
|
|
|
/*NullCheckValue=*/false);
|
2009-11-24 01:57:54 +08:00
|
|
|
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(Derived, E->getType());
|
2009-11-16 13:48:01 +08:00
|
|
|
}
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_LValueBitCast: {
|
2009-11-16 13:48:01 +08:00
|
|
|
// This must be a reinterpret_cast (or c-style equivalent).
|
|
|
|
const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
|
2009-11-15 05:21:42 +08:00
|
|
|
|
|
|
|
LValue LV = EmitLValue(E->getSubExpr());
|
|
|
|
llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
|
|
|
|
ConvertType(CE->getTypeAsWritten()));
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(V, E->getType());
|
2009-11-15 05:21:42 +08:00
|
|
|
}
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_ObjCObjectLValueCast: {
|
2010-08-07 19:51:51 +08:00
|
|
|
LValue LV = EmitLValue(E->getSubExpr());
|
|
|
|
QualType ToType = getContext().getLValueReferenceType(E->getType());
|
|
|
|
llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
|
|
|
|
ConvertType(ToType));
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(V, E->getType());
|
2010-08-07 19:51:51 +08:00
|
|
|
}
|
2009-09-13 00:16:49 +08:00
|
|
|
}
|
2010-07-16 02:58:16 +08:00
|
|
|
|
|
|
|
llvm_unreachable("Unhandled lvalue cast kind?");
|
2009-03-19 02:28:57 +08:00
|
|
|
}
|
|
|
|
|
2009-10-21 07:29:04 +08:00
|
|
|
LValue CodeGenFunction::EmitNullInitializationLValue(
|
2010-07-08 14:14:04 +08:00
|
|
|
const CXXScalarValueInitExpr *E) {
|
2009-10-21 07:29:04 +08:00
|
|
|
QualType Ty = E->getType();
|
2010-08-21 11:08:16 +08:00
|
|
|
LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
|
2010-05-23 01:35:42 +08:00
|
|
|
EmitNullInitialization(LV.getAddress(), Ty);
|
2010-02-09 10:48:28 +08:00
|
|
|
return LV;
|
2009-10-21 07:29:04 +08:00
|
|
|
}
|
|
|
|
|
2011-02-16 16:02:54 +08:00
|
|
|
LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
|
2011-11-09 06:54:08 +08:00
|
|
|
assert(OpaqueValueMappingData::shouldBindAsLValue(e));
|
2011-02-17 18:25:35 +08:00
|
|
|
return getOpaqueLValueMapping(e);
|
2011-02-16 16:02:54 +08:00
|
|
|
}
|
|
|
|
|
2011-06-22 01:03:29 +08:00
|
|
|
LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
|
|
|
|
const MaterializeTemporaryExpr *E) {
|
2011-08-27 05:08:13 +08:00
|
|
|
RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
|
2011-06-22 02:20:46 +08:00
|
|
|
return MakeAddrLValue(RV.getScalarVal(), E->getType());
|
2011-06-22 01:03:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-02 02:02:12 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Expression Emission
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
2009-12-25 04:40:36 +08:00
|
|
|
RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
|
|
|
|
ReturnValueSlot ReturnValue) {
|
2011-10-14 05:45:18 +08:00
|
|
|
if (CGDebugInfo *DI = getDebugInfo())
|
|
|
|
DI->EmitLocation(Builder, E->getLocStart());
|
2011-03-05 02:54:42 +08:00
|
|
|
|
2009-02-21 02:06:48 +08:00
|
|
|
// Builtins never have block type.
|
|
|
|
if (E->getCallee()->getType()->isBlockPointerType())
|
2009-12-25 05:13:40 +08:00
|
|
|
return EmitBlockCallExpr(E, ReturnValue);
|
2009-02-21 02:06:48 +08:00
|
|
|
|
2009-04-04 06:50:24 +08:00
|
|
|
if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
|
2009-12-25 05:13:40 +08:00
|
|
|
return EmitCXXMemberCallExpr(CE, ReturnValue);
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2011-10-07 02:29:37 +08:00
|
|
|
if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
|
|
|
|
return EmitCUDAKernelCallExpr(CE, ReturnValue);
|
|
|
|
|
2011-09-07 05:41:04 +08:00
|
|
|
const Decl *TargetDecl = E->getCalleeDecl();
|
|
|
|
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
|
|
|
|
if (unsigned builtinID = FD->getBuiltinID())
|
|
|
|
return EmitBuiltinExpr(FD, builtinID, E);
|
2009-02-21 02:06:48 +08:00
|
|
|
}
|
2009-01-10 00:50:52 +08:00
|
|
|
|
2009-06-13 08:26:38 +08:00
|
|
|
if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
|
2009-05-27 12:18:27 +08:00
|
|
|
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
|
2009-12-25 05:13:40 +08:00
|
|
|
return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
if (const CXXPseudoDestructorExpr *PseudoDtor
|
|
|
|
= dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
|
|
|
|
QualType DestroyedType = PseudoDtor->getDestroyedType();
|
2012-03-11 15:00:24 +08:00
|
|
|
if (getContext().getLangOpts().ObjCAutoRefCount &&
|
2011-06-16 07:02:42 +08:00
|
|
|
DestroyedType->isObjCLifetimeType() &&
|
|
|
|
(DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
|
|
|
|
DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
|
2011-06-18 18:34:00 +08:00
|
|
|
// Automatic Reference Counting:
|
|
|
|
// If the pseudo-expression names a retainable object with weak or
|
|
|
|
// strong lifetime, the object shall be released.
|
2011-06-16 07:02:42 +08:00
|
|
|
Expr *BaseExpr = PseudoDtor->getBase();
|
|
|
|
llvm::Value *BaseValue = NULL;
|
|
|
|
Qualifiers BaseQuals;
|
|
|
|
|
2011-06-18 18:34:00 +08:00
|
|
|
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
|
2011-06-16 07:02:42 +08:00
|
|
|
if (PseudoDtor->isArrow()) {
|
|
|
|
BaseValue = EmitScalarExpr(BaseExpr);
|
|
|
|
const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
|
|
|
|
BaseQuals = PTy->getPointeeType().getQualifiers();
|
|
|
|
} else {
|
|
|
|
LValue BaseLV = EmitLValue(BaseExpr);
|
|
|
|
BaseValue = BaseLV.getAddress();
|
|
|
|
QualType BaseTy = BaseExpr->getType();
|
|
|
|
BaseQuals = BaseTy.getQualifiers();
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Strong:
|
|
|
|
EmitARCRelease(Builder.CreateLoad(BaseValue,
|
2011-06-18 18:34:00 +08:00
|
|
|
PseudoDtor->getDestroyedType().isVolatileQualified()),
|
2011-06-16 07:02:42 +08:00
|
|
|
/*precise*/ true);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
EmitARCDestroyWeak(BaseValue);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// C++ [expr.pseudo]p1:
|
|
|
|
// The result shall only be used as the operand for the function call
|
|
|
|
// operator (), and the result of such a call has type void. The only
|
|
|
|
// effect is the evaluation of the postfix-expression before the dot or
|
|
|
|
// arrow.
|
|
|
|
EmitScalarExpr(E->getCallee());
|
|
|
|
}
|
|
|
|
|
2009-09-05 01:36:40 +08:00
|
|
|
return RValue::get(0);
|
|
|
|
}
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2007-08-24 13:35:26 +08:00
|
|
|
llvm::Value *Callee = EmitScalarExpr(E->getCallee());
|
2009-12-25 04:40:36 +08:00
|
|
|
return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
|
2009-05-27 09:22:39 +08:00
|
|
|
E->arg_begin(), E->arg_end(), TargetDecl);
|
2007-08-31 12:44:06 +08:00
|
|
|
}
|
|
|
|
|
2008-09-04 11:20:13 +08:00
|
|
|
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
|
2009-05-13 05:28:12 +08:00
|
|
|
// Comma expressions just emit their LHS then their RHS as an l-value.
|
2010-08-25 19:45:40 +08:00
|
|
|
if (E->getOpcode() == BO_Comma) {
|
2010-12-05 10:00:02 +08:00
|
|
|
EmitIgnoredExpr(E->getLHS());
|
2009-12-08 04:18:11 +08:00
|
|
|
EnsureInsertPoint();
|
2009-05-13 05:28:12 +08:00
|
|
|
return EmitLValue(E->getRHS());
|
|
|
|
}
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
if (E->getOpcode() == BO_PtrMemD ||
|
|
|
|
E->getOpcode() == BO_PtrMemI)
|
2009-10-23 06:57:31 +08:00
|
|
|
return EmitPointerToDataMemberBinaryExpr(E);
|
2008-09-04 11:20:13 +08:00
|
|
|
|
2010-12-05 10:00:02 +08:00
|
|
|
assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
|
2011-06-16 07:02:42 +08:00
|
|
|
|
|
|
|
// Note that in all of these cases, __block variables need the RHS
|
|
|
|
// evaluated first just in case the variable gets moved by the RHS.
|
2010-11-17 07:07:28 +08:00
|
|
|
|
2009-10-20 02:28:22 +08:00
|
|
|
if (!hasAggregateLLVMType(E->getType())) {
|
2011-06-16 07:02:42 +08:00
|
|
|
switch (E->getLHS()->getType().getObjCLifetime()) {
|
|
|
|
case Qualifiers::OCL_Strong:
|
|
|
|
return EmitARCStoreStrong(E, /*ignored*/ false).first;
|
|
|
|
|
|
|
|
case Qualifiers::OCL_Autoreleasing:
|
|
|
|
return EmitARCStoreAutoreleasing(E).first;
|
|
|
|
|
|
|
|
// No reason to do any of these differently.
|
|
|
|
case Qualifiers::OCL_None:
|
|
|
|
case Qualifiers::OCL_ExplicitNone:
|
|
|
|
case Qualifiers::OCL_Weak:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-12-06 14:10:02 +08:00
|
|
|
RValue RV = EmitAnyExpr(E->getRHS());
|
2009-10-20 02:28:22 +08:00
|
|
|
LValue LV = EmitLValue(E->getLHS());
|
2011-06-25 10:11:03 +08:00
|
|
|
EmitStoreThroughLValue(RV, LV);
|
2009-10-20 02:28:22 +08:00
|
|
|
return LV;
|
|
|
|
}
|
2010-11-17 07:07:28 +08:00
|
|
|
|
|
|
|
if (E->getType()->isAnyComplexType())
|
|
|
|
return EmitComplexAssignmentLValue(E);
|
|
|
|
|
2010-02-06 03:38:31 +08:00
|
|
|
return EmitAggExprToLValue(E);
|
2008-09-04 11:20:13 +08:00
|
|
|
}
|
|
|
|
|
2007-12-29 13:02:41 +08:00
|
|
|
LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
|
|
|
|
RValue RV = EmitCallExpr(E);
|
2009-05-27 09:45:47 +08:00
|
|
|
|
2009-10-29 01:39:19 +08:00
|
|
|
if (!RV.isScalar())
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
|
2009-10-29 01:39:19 +08:00
|
|
|
|
|
|
|
assert(E->getCallReturnType()->isReferenceType() &&
|
|
|
|
"Can't have a scalar return unless the return type is a "
|
|
|
|
"reference type!");
|
2009-09-09 21:00:44 +08:00
|
|
|
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(RV.getScalarVal(), E->getType());
|
2007-12-29 13:02:41 +08:00
|
|
|
}
|
|
|
|
|
2009-02-12 04:59:32 +08:00
|
|
|
LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
|
|
|
|
// FIXME: This shouldn't require another copy.
|
2010-02-06 03:38:31 +08:00
|
|
|
return EmitAggExprToLValue(E);
|
2009-02-12 04:59:32 +08:00
|
|
|
}
|
|
|
|
|
2009-05-31 07:23:33 +08:00
|
|
|
LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
|
2010-09-18 08:58:34 +08:00
|
|
|
assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
|
|
|
|
&& "binding l-value to type which needs a temporary");
|
2011-09-28 05:06:10 +08:00
|
|
|
AggValueSlot Slot = CreateAggTemp(E->getType());
|
2010-09-15 18:14:12 +08:00
|
|
|
EmitCXXConstructExpr(E, Slot);
|
|
|
|
return MakeAddrLValue(Slot.getAddr(), E->getType());
|
2009-05-31 07:23:33 +08:00
|
|
|
}
|
|
|
|
|
2009-11-15 16:09:41 +08:00
|
|
|
LValue
|
|
|
|
CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
|
2009-11-15 16:09:41 +08:00
|
|
|
}
|
|
|
|
|
2009-05-31 07:30:54 +08:00
|
|
|
LValue
|
|
|
|
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
|
2010-09-18 08:58:34 +08:00
|
|
|
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
|
2011-08-26 16:02:37 +08:00
|
|
|
Slot.setExternallyDestructed();
|
2010-09-18 08:58:34 +08:00
|
|
|
EmitAggExpr(E->getSubExpr(), Slot);
|
2011-11-28 06:09:22 +08:00
|
|
|
EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
|
2010-09-18 08:58:34 +08:00
|
|
|
return MakeAddrLValue(Slot.getAddr(), E->getType());
|
2009-05-31 07:30:54 +08:00
|
|
|
}
|
|
|
|
|
2012-02-08 13:34:55 +08:00
|
|
|
LValue
|
|
|
|
CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
|
|
|
|
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
|
2012-02-09 11:32:31 +08:00
|
|
|
EmitLambdaExpr(E, Slot);
|
2012-02-08 13:34:55 +08:00
|
|
|
return MakeAddrLValue(Slot.getAddr(), E->getType());
|
|
|
|
}
|
|
|
|
|
2008-08-23 18:51:21 +08:00
|
|
|
LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
|
|
|
|
RValue RV = EmitObjCMessageExpr(E);
|
2010-06-22 04:59:55 +08:00
|
|
|
|
|
|
|
if (!RV.isScalar())
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
|
2010-06-22 04:59:55 +08:00
|
|
|
|
|
|
|
assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
|
|
|
|
"Can't have a scalar return unless the return type is a "
|
|
|
|
"reference type!");
|
|
|
|
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(RV.getScalarVal(), E->getType());
|
2008-08-23 18:51:21 +08:00
|
|
|
}
|
|
|
|
|
2010-06-18 03:56:20 +08:00
|
|
|
LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
|
|
|
|
llvm::Value *V =
|
|
|
|
CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(V, E->getType());
|
2010-06-18 03:56:20 +08:00
|
|
|
}
|
|
|
|
|
2009-04-22 13:08:15 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
|
2008-09-24 12:00:38 +08:00
|
|
|
const ObjCIvarDecl *Ivar) {
|
2009-02-11 03:02:04 +08:00
|
|
|
return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
|
2008-09-24 12:00:38 +08:00
|
|
|
}
|
2008-08-25 09:53:23 +08:00
|
|
|
|
2009-02-03 08:09:52 +08:00
|
|
|
LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
|
|
|
|
llvm::Value *BaseValue,
|
2008-09-24 12:00:38 +08:00
|
|
|
const ObjCIvarDecl *Ivar,
|
|
|
|
unsigned CVRQualifiers) {
|
2009-04-18 01:44:48 +08:00
|
|
|
return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
|
2009-04-21 09:19:28 +08:00
|
|
|
Ivar, CVRQualifiers);
|
2008-09-24 12:00:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
|
2008-08-25 09:53:23 +08:00
|
|
|
// FIXME: A lot of the code below could be shared with EmitMemberExpr.
|
|
|
|
llvm::Value *BaseValue = 0;
|
|
|
|
const Expr *BaseExpr = E->getBase();
|
2009-09-25 03:53:00 +08:00
|
|
|
Qualifiers BaseQuals;
|
2009-02-03 08:09:52 +08:00
|
|
|
QualType ObjectTy;
|
2008-08-25 09:53:23 +08:00
|
|
|
if (E->isArrow()) {
|
|
|
|
BaseValue = EmitScalarExpr(BaseExpr);
|
2009-07-11 07:34:53 +08:00
|
|
|
ObjectTy = BaseExpr->getType()->getPointeeType();
|
2009-09-25 03:53:00 +08:00
|
|
|
BaseQuals = ObjectTy.getQualifiers();
|
2008-08-25 09:53:23 +08:00
|
|
|
} else {
|
|
|
|
LValue BaseLV = EmitLValue(BaseExpr);
|
|
|
|
// FIXME: this isn't right for bitfields.
|
|
|
|
BaseValue = BaseLV.getAddress();
|
2009-02-03 08:09:52 +08:00
|
|
|
ObjectTy = BaseExpr->getType();
|
2009-09-25 03:53:00 +08:00
|
|
|
BaseQuals = ObjectTy.getQualifiers();
|
2008-08-25 09:53:23 +08:00
|
|
|
}
|
2008-09-24 12:00:38 +08:00
|
|
|
|
2009-09-17 07:11:23 +08:00
|
|
|
LValue LV =
|
2009-09-25 03:53:00 +08:00
|
|
|
EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
|
|
|
|
BaseQuals.getCVRQualifiers());
|
2009-09-17 07:11:23 +08:00
|
|
|
setObjCGCLValueClass(getContext(), E, LV);
|
|
|
|
return LV;
|
2008-03-31 07:03:07 +08:00
|
|
|
}
|
|
|
|
|
2009-04-26 03:35:26 +08:00
|
|
|
LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
|
|
|
|
// Can only get l-value for message expression returning aggregate type
|
|
|
|
RValue RV = EmitAnyExprToTemp(E);
|
2010-08-21 11:08:16 +08:00
|
|
|
return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
|
2009-04-26 03:35:26 +08:00
|
|
|
}
|
|
|
|
|
2009-12-25 03:08:58 +08:00
|
|
|
RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
|
2009-12-25 04:40:36 +08:00
|
|
|
ReturnValueSlot ReturnValue,
|
2009-05-27 09:22:39 +08:00
|
|
|
CallExpr::const_arg_iterator ArgBeg,
|
|
|
|
CallExpr::const_arg_iterator ArgEnd,
|
|
|
|
const Decl *TargetDecl) {
|
2009-09-09 21:00:44 +08:00
|
|
|
// Get the actual function type. The callee type will always be a pointer to
|
|
|
|
// function type or a block pointer type.
|
|
|
|
assert(CalleeType->isFunctionPointerType() &&
|
2009-04-08 02:53:02 +08:00
|
|
|
"Call must have function pointer type!");
|
|
|
|
|
2009-10-23 16:22:42 +08:00
|
|
|
CalleeType = getContext().getCanonicalType(CalleeType);
|
|
|
|
|
2010-02-06 05:31:56 +08:00
|
|
|
const FunctionType *FnType
|
|
|
|
= cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
|
2008-08-30 11:02:31 +08:00
|
|
|
|
|
|
|
CallArgList Args;
|
2009-10-23 16:22:42 +08:00
|
|
|
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
|
2008-08-30 11:02:31 +08:00
|
|
|
|
2012-02-17 11:33:10 +08:00
|
|
|
const CGFunctionInfo &FnInfo =
|
|
|
|
CGM.getTypes().arrangeFunctionCall(Args, FnType);
|
2011-09-21 16:08:30 +08:00
|
|
|
|
|
|
|
// C99 6.5.2.2p6:
|
|
|
|
// If the expression that denotes the called function has a type
|
|
|
|
// that does not include a prototype, [the default argument
|
|
|
|
// promotions are performed]. If the number of arguments does not
|
|
|
|
// equal the number of parameters, the behavior is undefined. If
|
|
|
|
// the function is defined with a type that includes a prototype,
|
|
|
|
// and either the prototype ends with an ellipsis (, ...) or the
|
|
|
|
// types of the arguments after promotion are not compatible with
|
|
|
|
// the types of the parameters, the behavior is undefined. If the
|
|
|
|
// function is defined with a type that does not include a
|
|
|
|
// prototype, and the types of the arguments after promotion are
|
|
|
|
// not compatible with those of the parameters after promotion,
|
|
|
|
// the behavior is undefined [except in some trivial cases].
|
|
|
|
// That is, in the general case, we should assume that a call
|
|
|
|
// through an unprototyped function type works like a *non-variadic*
|
|
|
|
// call. The way we make this work is to cast to the exact type
|
|
|
|
// of the promoted arguments.
|
2012-02-17 11:33:10 +08:00
|
|
|
if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
|
|
|
|
llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
|
2011-09-21 16:08:30 +08:00
|
|
|
CalleeTy = CalleeTy->getPointerTo();
|
|
|
|
Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
|
|
|
|
}
|
|
|
|
|
|
|
|
return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
|
2008-08-23 11:46:30 +08:00
|
|
|
}
|
2009-10-23 06:57:31 +08:00
|
|
|
|
2009-10-29 01:39:19 +08:00
|
|
|
LValue CodeGenFunction::
|
|
|
|
EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
|
2009-11-18 13:01:17 +08:00
|
|
|
llvm::Value *BaseV;
|
2010-08-25 19:45:40 +08:00
|
|
|
if (E->getOpcode() == BO_PtrMemI)
|
2009-11-18 13:01:17 +08:00
|
|
|
BaseV = EmitScalarExpr(E->getLHS());
|
|
|
|
else
|
|
|
|
BaseV = EmitLValue(E->getLHS()).getAddress();
|
2010-09-01 05:07:20 +08:00
|
|
|
|
2009-11-18 13:01:17 +08:00
|
|
|
llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
|
2009-10-29 01:39:19 +08:00
|
|
|
|
2010-09-01 05:07:20 +08:00
|
|
|
const MemberPointerType *MPT
|
|
|
|
= E->getRHS()->getType()->getAs<MemberPointerType>();
|
|
|
|
|
|
|
|
llvm::Value *AddV =
|
|
|
|
CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
|
|
|
|
|
|
|
|
return MakeAddrLValue(AddV, MPT->getPointeeType());
|
2009-10-23 06:57:31 +08:00
|
|
|
}
|
2011-10-11 10:20:01 +08:00
|
|
|
|
|
|
|
static void
|
|
|
|
EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
|
|
|
|
llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
|
|
|
|
uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
|
2012-04-12 13:08:17 +08:00
|
|
|
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
|
|
|
|
llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
|
|
|
|
|
|
|
|
switch (E->getOp()) {
|
|
|
|
case AtomicExpr::AO__c11_atomic_init:
|
|
|
|
llvm_unreachable("Already handled!");
|
|
|
|
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
|
|
|
|
case AtomicExpr::AO__atomic_compare_exchange:
|
|
|
|
case AtomicExpr::AO__atomic_compare_exchange_n: {
|
2011-10-11 10:20:01 +08:00
|
|
|
// Note that cmpxchg only supports specifying one ordering and
|
|
|
|
// doesn't support weak cmpxchg, at least at the moment.
|
|
|
|
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
|
|
|
|
LoadVal1->setAlignment(Align);
|
|
|
|
llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
|
|
|
|
LoadVal2->setAlignment(Align);
|
|
|
|
llvm::AtomicCmpXchgInst *CXI =
|
|
|
|
CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
|
|
|
|
CXI->setVolatile(E->isVolatile());
|
|
|
|
llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
|
|
|
|
StoreVal1->setAlignment(Align);
|
|
|
|
llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
|
|
|
|
CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-04-12 13:08:17 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_load:
|
|
|
|
case AtomicExpr::AO__atomic_load_n:
|
|
|
|
case AtomicExpr::AO__atomic_load: {
|
2011-10-11 10:20:01 +08:00
|
|
|
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
|
|
|
|
Load->setAtomic(Order);
|
|
|
|
Load->setAlignment(Size);
|
|
|
|
Load->setVolatile(E->isVolatile());
|
|
|
|
llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
|
|
|
|
StoreDest->setAlignment(Align);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-04-12 13:08:17 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_store:
|
|
|
|
case AtomicExpr::AO__atomic_store:
|
|
|
|
case AtomicExpr::AO__atomic_store_n: {
|
2011-10-11 10:20:01 +08:00
|
|
|
assert(!Dest && "Store does not return a value");
|
|
|
|
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
|
|
|
|
LoadVal1->setAlignment(Align);
|
|
|
|
llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
|
|
|
|
Store->setAtomic(Order);
|
|
|
|
Store->setAlignment(Size);
|
|
|
|
Store->setVolatile(E->isVolatile());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-04-12 13:08:17 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_exchange:
|
|
|
|
case AtomicExpr::AO__atomic_exchange_n:
|
|
|
|
case AtomicExpr::AO__atomic_exchange:
|
|
|
|
Op = llvm::AtomicRMWInst::Xchg;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_add_fetch:
|
|
|
|
PostOp = llvm::Instruction::Add;
|
|
|
|
// Fall through.
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_add:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_add:
|
|
|
|
Op = llvm::AtomicRMWInst::Add;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_sub_fetch:
|
|
|
|
PostOp = llvm::Instruction::Sub;
|
|
|
|
// Fall through.
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_sub:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_sub:
|
|
|
|
Op = llvm::AtomicRMWInst::Sub;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_and_fetch:
|
|
|
|
PostOp = llvm::Instruction::And;
|
|
|
|
// Fall through.
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_and:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_and:
|
|
|
|
Op = llvm::AtomicRMWInst::And;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_or_fetch:
|
|
|
|
PostOp = llvm::Instruction::Or;
|
|
|
|
// Fall through.
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_or:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_or:
|
|
|
|
Op = llvm::AtomicRMWInst::Or;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_xor_fetch:
|
|
|
|
PostOp = llvm::Instruction::Xor;
|
|
|
|
// Fall through.
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_xor:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_xor:
|
|
|
|
Op = llvm::AtomicRMWInst::Xor;
|
|
|
|
break;
|
2011-10-11 10:20:01 +08:00
|
|
|
}
|
2012-04-12 13:08:17 +08:00
|
|
|
|
2011-10-11 10:20:01 +08:00
|
|
|
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
|
|
|
|
LoadVal1->setAlignment(Align);
|
|
|
|
llvm::AtomicRMWInst *RMWI =
|
|
|
|
CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
|
|
|
|
RMWI->setVolatile(E->isVolatile());
|
2012-04-12 13:08:17 +08:00
|
|
|
|
|
|
|
// For __atomic_*_fetch operations, perform the operation again to
|
|
|
|
// determine the value which was written.
|
|
|
|
llvm::Value *Result = RMWI;
|
|
|
|
if (PostOp)
|
|
|
|
Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
|
|
|
|
|
|
|
|
llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
|
2011-10-11 10:20:01 +08:00
|
|
|
StoreDest->setAlignment(Align);
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function emits any expression (scalar, complex, or aggregate)
|
|
|
|
// into a temporary alloca.
|
|
|
|
static llvm::Value *
|
|
|
|
EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
|
|
|
|
llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
|
2012-03-30 01:37:10 +08:00
|
|
|
CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
|
|
|
|
/*Init*/ true);
|
2011-10-11 10:20:01 +08:00
|
|
|
return DeclPtr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
|
|
|
|
llvm::Value *Dest) {
|
|
|
|
if (Ty->isAnyComplexType())
|
|
|
|
return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
|
|
|
|
if (CGF.hasAggregateLLVMType(Ty))
|
|
|
|
return RValue::getAggregate(Dest);
|
|
|
|
return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
|
|
|
|
}
|
|
|
|
|
|
|
|
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
|
|
|
|
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
|
2012-04-12 13:08:17 +08:00
|
|
|
QualType MemTy = AtomicTy;
|
|
|
|
if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
|
|
|
|
MemTy = AT->getValueType();
|
2011-10-11 10:20:01 +08:00
|
|
|
CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
|
|
|
|
uint64_t Size = sizeChars.getQuantity();
|
|
|
|
CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
|
|
|
|
unsigned Align = alignChars.getQuantity();
|
2011-10-15 04:59:01 +08:00
|
|
|
unsigned MaxInlineWidth =
|
|
|
|
getContext().getTargetInfo().getMaxAtomicInlineWidth();
|
|
|
|
bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
|
2011-10-11 10:20:01 +08:00
|
|
|
|
2012-01-17 01:27:18 +08:00
|
|
|
|
|
|
|
|
2011-10-11 10:20:01 +08:00
|
|
|
llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
|
|
|
|
Ptr = EmitScalarExpr(E->getPtr());
|
2012-01-17 01:27:18 +08:00
|
|
|
|
2012-04-12 13:08:17 +08:00
|
|
|
if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
|
2012-01-17 01:27:18 +08:00
|
|
|
assert(!Dest && "Init does not return a value");
|
2012-04-12 01:24:05 +08:00
|
|
|
if (!hasAggregateLLVMType(E->getVal1()->getType())) {
|
2012-04-13 04:42:30 +08:00
|
|
|
QualType PointeeType
|
|
|
|
= E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
|
|
|
|
EmitScalarInit(EmitScalarExpr(E->getVal1()),
|
|
|
|
LValue::MakeAddr(Ptr, PointeeType, alignChars,
|
|
|
|
getContext()));
|
2012-04-12 01:24:05 +08:00
|
|
|
} else if (E->getType()->isAnyComplexType()) {
|
|
|
|
EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
|
|
|
|
} else {
|
|
|
|
AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
|
|
|
|
AtomicTy.getQualifiers(),
|
|
|
|
AggValueSlot::IsNotDestructed,
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
|
|
|
AggValueSlot::IsNotAliased);
|
|
|
|
EmitAggExpr(E->getVal1(), Slot);
|
|
|
|
}
|
2012-01-17 01:27:18 +08:00
|
|
|
return RValue::get(0);
|
|
|
|
}
|
|
|
|
|
2011-10-11 10:20:01 +08:00
|
|
|
Order = EmitScalarExpr(E->getOrder());
|
2012-04-12 13:08:17 +08:00
|
|
|
|
|
|
|
switch (E->getOp()) {
|
|
|
|
case AtomicExpr::AO__c11_atomic_init:
|
|
|
|
llvm_unreachable("Already handled!");
|
|
|
|
|
|
|
|
case AtomicExpr::AO__c11_atomic_load:
|
|
|
|
case AtomicExpr::AO__atomic_load_n:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_load:
|
|
|
|
Dest = EmitScalarExpr(E->getVal1());
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_store:
|
|
|
|
Val1 = EmitScalarExpr(E->getVal1());
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_exchange:
|
|
|
|
Val1 = EmitScalarExpr(E->getVal1());
|
|
|
|
Dest = EmitScalarExpr(E->getVal2());
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
|
|
|
|
case AtomicExpr::AO__atomic_compare_exchange_n:
|
|
|
|
case AtomicExpr::AO__atomic_compare_exchange:
|
2011-10-11 10:20:01 +08:00
|
|
|
Val1 = EmitScalarExpr(E->getVal1());
|
2012-04-12 13:08:17 +08:00
|
|
|
if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
|
|
|
|
Val2 = EmitScalarExpr(E->getVal2());
|
|
|
|
else
|
|
|
|
Val2 = EmitValToTemp(*this, E->getVal2());
|
2011-10-11 10:20:01 +08:00
|
|
|
OrderFail = EmitScalarExpr(E->getOrderFail());
|
2012-04-12 13:08:17 +08:00
|
|
|
// Evaluate and discard the 'weak' argument.
|
|
|
|
if (E->getNumSubExprs() == 6)
|
|
|
|
EmitScalarExpr(E->getWeak());
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_add:
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_sub:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_add:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_sub:
|
|
|
|
case AtomicExpr::AO__atomic_add_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_sub_fetch:
|
|
|
|
if (MemTy->isPointerType()) {
|
|
|
|
// For pointer arithmetic, we're required to do a bit of math:
|
|
|
|
// adding 1 to an int* is not the same as adding 1 to a uintptr_t.
|
|
|
|
QualType Val1Ty = E->getVal1()->getType();
|
|
|
|
llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
|
|
|
|
CharUnits PointeeIncAmt =
|
|
|
|
getContext().getTypeSizeInChars(MemTy->getPointeeType());
|
|
|
|
Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
|
|
|
|
Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
|
|
|
|
EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Fall through.
|
|
|
|
case AtomicExpr::AO__c11_atomic_store:
|
|
|
|
case AtomicExpr::AO__c11_atomic_exchange:
|
|
|
|
case AtomicExpr::AO__atomic_store_n:
|
|
|
|
case AtomicExpr::AO__atomic_exchange_n:
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_and:
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_or:
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_xor:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_and:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_or:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_xor:
|
|
|
|
case AtomicExpr::AO__atomic_and_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_or_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_xor_fetch:
|
2011-10-11 10:20:01 +08:00
|
|
|
Val1 = EmitValToTemp(*this, E->getVal1());
|
2012-04-12 13:08:17 +08:00
|
|
|
break;
|
2011-10-11 10:20:01 +08:00
|
|
|
}
|
|
|
|
|
2012-04-12 13:08:17 +08:00
|
|
|
if (!E->getType()->isVoidType() && !Dest)
|
2011-10-11 10:20:01 +08:00
|
|
|
Dest = CreateMemTemp(E->getType(), ".atomicdst");
|
|
|
|
|
2012-03-30 02:01:11 +08:00
|
|
|
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
|
2011-10-11 10:20:01 +08:00
|
|
|
if (UseLibcall) {
|
2012-03-30 02:01:11 +08:00
|
|
|
|
|
|
|
llvm::SmallVector<QualType, 5> Params;
|
|
|
|
CallArgList Args;
|
|
|
|
// Size is always the first parameter
|
|
|
|
Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
|
|
|
|
getContext().getSizeType());
|
|
|
|
// Atomic address is always the second parameter
|
|
|
|
Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
|
|
|
|
getContext().VoidPtrTy);
|
|
|
|
|
2011-10-11 10:20:01 +08:00
|
|
|
const char* LibCallName;
|
2012-03-30 02:01:11 +08:00
|
|
|
QualType RetTy = getContext().VoidTy;
|
2011-10-11 10:20:01 +08:00
|
|
|
switch (E->getOp()) {
|
2012-03-30 02:01:11 +08:00
|
|
|
// There is only one libcall for compare an exchange, because there is no
|
|
|
|
// optimisation benefit possible from a libcall version of a weak compare
|
|
|
|
// and exchange.
|
|
|
|
// bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
|
2012-04-12 13:08:17 +08:00
|
|
|
// void *desired, int success, int failure)
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
|
|
|
|
case AtomicExpr::AO__atomic_compare_exchange:
|
|
|
|
case AtomicExpr::AO__atomic_compare_exchange_n:
|
2012-03-30 02:01:11 +08:00
|
|
|
LibCallName = "__atomic_compare_exchange";
|
|
|
|
RetTy = getContext().BoolTy;
|
|
|
|
Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
|
|
|
|
getContext().VoidPtrTy);
|
|
|
|
Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
|
|
|
|
getContext().VoidPtrTy);
|
|
|
|
Args.add(RValue::get(Order),
|
|
|
|
getContext().IntTy);
|
|
|
|
Order = OrderFail;
|
|
|
|
break;
|
|
|
|
// void __atomic_exchange(size_t size, void *mem, void *val, void *return,
|
|
|
|
// int order)
|
2012-04-12 13:08:17 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_exchange:
|
|
|
|
case AtomicExpr::AO__atomic_exchange_n:
|
|
|
|
case AtomicExpr::AO__atomic_exchange:
|
2012-03-30 02:01:11 +08:00
|
|
|
LibCallName = "__atomic_exchange";
|
|
|
|
Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
|
|
|
|
getContext().VoidPtrTy);
|
2011-10-11 10:20:01 +08:00
|
|
|
Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
|
|
|
|
getContext().VoidPtrTy);
|
2012-03-30 02:01:11 +08:00
|
|
|
break;
|
|
|
|
// void __atomic_store(size_t size, void *mem, void *val, int order)
|
2012-04-12 13:08:17 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_store:
|
|
|
|
case AtomicExpr::AO__atomic_store:
|
|
|
|
case AtomicExpr::AO__atomic_store_n:
|
2012-03-30 02:01:11 +08:00
|
|
|
LibCallName = "__atomic_store";
|
2011-10-11 10:20:01 +08:00
|
|
|
Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
|
|
|
|
getContext().VoidPtrTy);
|
2012-03-30 02:01:11 +08:00
|
|
|
break;
|
|
|
|
// void __atomic_load(size_t size, void *mem, void *return, int order)
|
2012-04-12 13:08:17 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_load:
|
|
|
|
case AtomicExpr::AO__atomic_load:
|
|
|
|
case AtomicExpr::AO__atomic_load_n:
|
2012-03-30 02:01:11 +08:00
|
|
|
LibCallName = "__atomic_load";
|
|
|
|
Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
|
2011-10-11 10:20:01 +08:00
|
|
|
getContext().VoidPtrTy);
|
2012-03-30 02:01:11 +08:00
|
|
|
break;
|
|
|
|
#if 0
|
|
|
|
// These are only defined for 1-16 byte integers. It is not clear what
|
|
|
|
// their semantics would be on anything else...
|
|
|
|
case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
|
|
|
|
case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
|
|
|
|
case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
|
|
|
|
case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
|
|
|
|
case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
|
|
|
|
#endif
|
|
|
|
default: return EmitUnsupportedRValue(E, "atomic library call");
|
2011-10-11 10:20:01 +08:00
|
|
|
}
|
2012-03-30 02:01:11 +08:00
|
|
|
// order is always the last parameter
|
|
|
|
Args.add(RValue::get(Order),
|
|
|
|
getContext().IntTy);
|
|
|
|
|
2011-10-11 10:20:01 +08:00
|
|
|
const CGFunctionInfo &FuncInfo =
|
2012-03-30 02:01:11 +08:00
|
|
|
CGM.getTypes().arrangeFunctionCall(RetTy, Args,
|
|
|
|
FunctionType::ExtInfo(), RequiredArgs::All);
|
|
|
|
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
|
2011-10-11 10:20:01 +08:00
|
|
|
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
|
|
|
|
RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
|
|
|
|
if (E->isCmpXChg())
|
|
|
|
return Res;
|
2012-04-12 13:08:17 +08:00
|
|
|
if (E->getType()->isVoidType())
|
2011-10-11 10:20:01 +08:00
|
|
|
return RValue::get(0);
|
|
|
|
return ConvertTempToRValue(*this, E->getType(), Dest);
|
|
|
|
}
|
2012-03-30 02:01:11 +08:00
|
|
|
|
2011-10-11 10:20:01 +08:00
|
|
|
llvm::Type *IPtrTy =
|
|
|
|
llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
|
|
|
|
llvm::Value *OrigDest = Dest;
|
|
|
|
Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
|
|
|
|
if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
|
|
|
|
if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
|
|
|
|
if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
|
|
|
|
|
|
|
|
if (isa<llvm::ConstantInt>(Order)) {
|
|
|
|
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
|
|
|
|
switch (ord) {
|
|
|
|
case 0: // memory_order_relaxed
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
|
|
|
|
llvm::Monotonic);
|
|
|
|
break;
|
|
|
|
case 1: // memory_order_consume
|
|
|
|
case 2: // memory_order_acquire
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
|
|
|
|
llvm::Acquire);
|
|
|
|
break;
|
|
|
|
case 3: // memory_order_release
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
|
|
|
|
llvm::Release);
|
|
|
|
break;
|
|
|
|
case 4: // memory_order_acq_rel
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
|
|
|
|
llvm::AcquireRelease);
|
|
|
|
break;
|
|
|
|
case 5: // memory_order_seq_cst
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
|
|
|
|
llvm::SequentiallyConsistent);
|
|
|
|
break;
|
|
|
|
default: // invalid order
|
|
|
|
// We should not ever get here normally, but it's hard to
|
|
|
|
// enforce that in general.
|
2012-04-12 13:08:17 +08:00
|
|
|
break;
|
2011-10-11 10:20:01 +08:00
|
|
|
}
|
2012-04-12 13:08:17 +08:00
|
|
|
if (E->getType()->isVoidType())
|
2011-10-11 10:20:01 +08:00
|
|
|
return RValue::get(0);
|
|
|
|
return ConvertTempToRValue(*this, E->getType(), OrigDest);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Long case, when Order isn't obviously constant.
|
|
|
|
|
2012-04-12 13:08:17 +08:00
|
|
|
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
|
|
|
|
E->getOp() == AtomicExpr::AO__atomic_store ||
|
|
|
|
E->getOp() == AtomicExpr::AO__atomic_store_n;
|
|
|
|
bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
|
|
|
|
E->getOp() == AtomicExpr::AO__atomic_load ||
|
|
|
|
E->getOp() == AtomicExpr::AO__atomic_load_n;
|
|
|
|
|
2011-10-11 10:20:01 +08:00
|
|
|
// Create all the relevant BB's
|
2011-10-12 04:00:47 +08:00
|
|
|
llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
|
|
|
|
*AcqRelBB = 0, *SeqCstBB = 0;
|
2011-10-11 10:20:01 +08:00
|
|
|
MonotonicBB = createBasicBlock("monotonic", CurFn);
|
2012-04-12 13:08:17 +08:00
|
|
|
if (!IsStore)
|
2011-10-11 10:20:01 +08:00
|
|
|
AcquireBB = createBasicBlock("acquire", CurFn);
|
2012-04-12 13:08:17 +08:00
|
|
|
if (!IsLoad)
|
2011-10-11 10:20:01 +08:00
|
|
|
ReleaseBB = createBasicBlock("release", CurFn);
|
2012-04-12 13:08:17 +08:00
|
|
|
if (!IsLoad && !IsStore)
|
2011-10-11 10:20:01 +08:00
|
|
|
AcqRelBB = createBasicBlock("acqrel", CurFn);
|
|
|
|
SeqCstBB = createBasicBlock("seqcst", CurFn);
|
|
|
|
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
|
|
|
|
|
|
|
|
// Create the switch for the split
|
|
|
|
// MonotonicBB is arbitrarily chosen as the default case; in practice, this
|
|
|
|
// doesn't matter unless someone is crazy enough to use something that
|
|
|
|
// doesn't fold to a constant for the ordering.
|
|
|
|
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
|
|
|
|
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
|
|
|
|
|
|
|
|
// Emit all the different atomics
|
|
|
|
Builder.SetInsertPoint(MonotonicBB);
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
|
|
|
|
llvm::Monotonic);
|
|
|
|
Builder.CreateBr(ContBB);
|
2012-04-12 13:08:17 +08:00
|
|
|
if (!IsStore) {
|
2011-10-11 10:20:01 +08:00
|
|
|
Builder.SetInsertPoint(AcquireBB);
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
|
|
|
|
llvm::Acquire);
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
SI->addCase(Builder.getInt32(1), AcquireBB);
|
|
|
|
SI->addCase(Builder.getInt32(2), AcquireBB);
|
|
|
|
}
|
2012-04-12 13:08:17 +08:00
|
|
|
if (!IsLoad) {
|
2011-10-11 10:20:01 +08:00
|
|
|
Builder.SetInsertPoint(ReleaseBB);
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
|
|
|
|
llvm::Release);
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
SI->addCase(Builder.getInt32(3), ReleaseBB);
|
|
|
|
}
|
2012-04-12 13:08:17 +08:00
|
|
|
if (!IsLoad && !IsStore) {
|
2011-10-11 10:20:01 +08:00
|
|
|
Builder.SetInsertPoint(AcqRelBB);
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
|
|
|
|
llvm::AcquireRelease);
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
SI->addCase(Builder.getInt32(4), AcqRelBB);
|
|
|
|
}
|
|
|
|
Builder.SetInsertPoint(SeqCstBB);
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
|
|
|
|
llvm::SequentiallyConsistent);
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
SI->addCase(Builder.getInt32(5), SeqCstBB);
|
|
|
|
|
|
|
|
// Cleanup and return
|
|
|
|
Builder.SetInsertPoint(ContBB);
|
2012-04-12 13:08:17 +08:00
|
|
|
if (E->getType()->isVoidType())
|
2011-10-11 10:20:01 +08:00
|
|
|
return RValue::get(0);
|
|
|
|
return ConvertTempToRValue(*this, E->getType(), OrigDest);
|
|
|
|
}
|
2011-10-28 03:19:51 +08:00
|
|
|
|
2012-04-10 16:23:07 +08:00
|
|
|
void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
|
2011-10-28 03:19:51 +08:00
|
|
|
assert(Val->getType()->isFPOrFPVectorTy());
|
2012-04-10 16:23:07 +08:00
|
|
|
if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
|
2011-10-28 03:19:51 +08:00
|
|
|
return;
|
|
|
|
|
2012-04-10 16:23:07 +08:00
|
|
|
llvm::Value *ULPs = llvm::ConstantFP::get(Builder.getFloatTy(), Accuracy);
|
|
|
|
llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), ULPs);
|
2011-10-28 03:19:51 +08:00
|
|
|
|
|
|
|
cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpaccuracy,
|
|
|
|
Node);
|
|
|
|
}
|
2011-11-06 17:01:30 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
struct LValueOrRValue {
|
|
|
|
LValue LV;
|
|
|
|
RValue RV;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
|
|
|
|
const PseudoObjectExpr *E,
|
|
|
|
bool forLValue,
|
|
|
|
AggValueSlot slot) {
|
|
|
|
llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
|
|
|
|
|
|
|
|
// Find the result expression, if any.
|
|
|
|
const Expr *resultExpr = E->getResultExpr();
|
|
|
|
LValueOrRValue result;
|
|
|
|
|
|
|
|
for (PseudoObjectExpr::const_semantics_iterator
|
|
|
|
i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
|
|
|
|
const Expr *semantic = *i;
|
|
|
|
|
|
|
|
// If this semantic expression is an opaque value, bind it
|
|
|
|
// to the result of its source expression.
|
|
|
|
if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
|
|
|
|
|
|
|
|
// If this is the result expression, we may need to evaluate
|
|
|
|
// directly into the slot.
|
|
|
|
typedef CodeGenFunction::OpaqueValueMappingData OVMA;
|
|
|
|
OVMA opaqueData;
|
|
|
|
if (ov == resultExpr && ov->isRValue() && !forLValue &&
|
|
|
|
CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
|
|
|
|
!ov->getType()->isAnyComplexType()) {
|
|
|
|
CGF.EmitAggExpr(ov->getSourceExpr(), slot);
|
|
|
|
|
|
|
|
LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
|
|
|
|
opaqueData = OVMA::bind(CGF, ov, LV);
|
|
|
|
result.RV = slot.asRValue();
|
|
|
|
|
|
|
|
// Otherwise, emit as normal.
|
|
|
|
} else {
|
|
|
|
opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
|
|
|
|
|
|
|
|
// If this is the result, also evaluate the result now.
|
|
|
|
if (ov == resultExpr) {
|
|
|
|
if (forLValue)
|
|
|
|
result.LV = CGF.EmitLValue(ov);
|
|
|
|
else
|
|
|
|
result.RV = CGF.EmitAnyExpr(ov, slot);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
opaques.push_back(opaqueData);
|
|
|
|
|
|
|
|
// Otherwise, if the expression is the result, evaluate it
|
|
|
|
// and remember the result.
|
|
|
|
} else if (semantic == resultExpr) {
|
|
|
|
if (forLValue)
|
|
|
|
result.LV = CGF.EmitLValue(semantic);
|
|
|
|
else
|
|
|
|
result.RV = CGF.EmitAnyExpr(semantic, slot);
|
|
|
|
|
|
|
|
// Otherwise, evaluate the expression in an ignored context.
|
|
|
|
} else {
|
|
|
|
CGF.EmitIgnoredExpr(semantic);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unbind all the opaques now.
|
|
|
|
for (unsigned i = 0, e = opaques.size(); i != e; ++i)
|
|
|
|
opaques[i].unbind(CGF);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
|
|
|
|
AggValueSlot slot) {
|
|
|
|
return emitPseudoObjectExpr(*this, E, false, slot).RV;
|
|
|
|
}
|
|
|
|
|
|
|
|
LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
|
|
|
|
return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
|
|
|
|
}
|