2007-08-24 10:22:53 +08:00
|
|
|
//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
|
2007-08-11 04:13:28 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-08-11 04:13:28 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code to emit Aggregate Expr nodes as LLVM code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CodeGenFunction.h"
|
2009-07-08 09:18:33 +08:00
|
|
|
#include "CGObjCRuntime.h"
|
2012-12-04 17:13:33 +08:00
|
|
|
#include "CodeGenModule.h"
|
2008-08-11 13:00:27 +08:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2009-04-17 08:06:03 +08:00
|
|
|
#include "clang/AST/DeclCXX.h"
|
2012-02-17 16:42:25 +08:00
|
|
|
#include "clang/AST/DeclTemplate.h"
|
2008-08-11 13:00:27 +08:00
|
|
|
#include "clang/AST/StmtVisitor.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2007-08-11 04:13:28 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
2007-08-11 08:04:45 +08:00
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Aggregate Expression Emitter
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
2009-11-29 03:45:26 +08:00
|
|
|
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
|
2007-08-21 12:25:47 +08:00
|
|
|
CodeGenFunction &CGF;
|
2008-11-01 09:53:16 +08:00
|
|
|
CGBuilderTy &Builder;
|
2010-09-15 18:14:12 +08:00
|
|
|
AggValueSlot Dest;
|
2010-05-22 09:48:05 +08:00
|
|
|
|
2011-08-26 07:04:34 +08:00
|
|
|
/// We want to use 'dest' as the return slot except under two
|
|
|
|
/// conditions:
|
|
|
|
/// - The destination slot requires garbage collection, so we
|
|
|
|
/// need to use the GC API.
|
|
|
|
/// - The destination slot is potentially aliased.
|
|
|
|
bool shouldUseDestForReturnSlot() const {
|
|
|
|
return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
|
|
|
|
}
|
|
|
|
|
2010-05-22 09:48:05 +08:00
|
|
|
ReturnValueSlot getReturnValueSlot() const {
|
2011-08-26 07:04:34 +08:00
|
|
|
if (!shouldUseDestForReturnSlot())
|
|
|
|
return ReturnValueSlot();
|
2010-05-23 06:13:32 +08:00
|
|
|
|
2010-09-15 18:14:12 +08:00
|
|
|
return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
|
|
|
|
}
|
|
|
|
|
|
|
|
AggValueSlot EnsureSlot(QualType T) {
|
|
|
|
if (!Dest.isIgnored()) return Dest;
|
|
|
|
return CGF.CreateAggTemp(T, "agg.tmp.ensured");
|
2010-05-22 09:48:05 +08:00
|
|
|
}
|
2012-07-03 07:58:38 +08:00
|
|
|
void EnsureDest(QualType T) {
|
|
|
|
if (!Dest.isIgnored()) return;
|
|
|
|
Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
|
|
|
|
}
|
2010-05-23 06:13:32 +08:00
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
public:
|
2012-07-03 07:58:38 +08:00
|
|
|
AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest)
|
|
|
|
: CGF(cgf), Builder(CGF.Builder), Dest(Dest) {
|
2007-08-21 12:25:47 +08:00
|
|
|
}
|
|
|
|
|
2007-08-21 12:59:27 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Utilities
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
/// EmitAggLoadOfLValue - Given an expression with aggregate type that
|
|
|
|
/// represents a value lvalue, this method emits the address of the lvalue,
|
|
|
|
/// then loads the result into DestPtr.
|
|
|
|
void EmitAggLoadOfLValue(const Expr *E);
|
2008-05-20 01:51:16 +08:00
|
|
|
|
2009-05-24 04:28:01 +08:00
|
|
|
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
2012-07-03 07:58:38 +08:00
|
|
|
void EmitFinalDestCopy(QualType type, const LValue &src);
|
|
|
|
void EmitFinalDestCopy(QualType type, RValue src,
|
|
|
|
CharUnits srcAlignment = CharUnits::Zero());
|
|
|
|
void EmitCopy(QualType type, const AggValueSlot &dest,
|
|
|
|
const AggValueSlot &src);
|
2009-05-24 04:28:01 +08:00
|
|
|
|
2011-08-26 07:04:34 +08:00
|
|
|
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
|
2010-05-23 06:13:32 +08:00
|
|
|
|
2012-02-17 16:42:25 +08:00
|
|
|
void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
|
|
|
|
QualType elementType, InitListExpr *E);
|
|
|
|
|
2011-08-26 04:40:09 +08:00
|
|
|
AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
|
2011-08-26 04:40:09 +08:00
|
|
|
return AggValueSlot::NeedsGCBarriers;
|
|
|
|
return AggValueSlot::DoesNotNeedGCBarriers;
|
|
|
|
}
|
|
|
|
|
2010-05-23 06:13:32 +08:00
|
|
|
bool TypeRequiresGCollection(QualType T);
|
|
|
|
|
2007-08-21 12:59:27 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Visitor Methods
|
|
|
|
//===--------------------------------------------------------------------===//
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2015-01-18 09:48:19 +08:00
|
|
|
void Visit(Expr *E) {
|
DebugInfo: Use the preferred location rather than the start location for expression line info
This causes things like assignment to refer to the '=' rather than the
LHS when attributing the store instruction, for example.
There were essentially 3 options for this:
* The beginning of an expression (this was the behavior prior to this
commit). This meant that stepping through subexpressions would bounce
around from subexpressions back to the start of the outer expression,
etc. (eg: x + y + z would go x, y, x, z, x (the repeated 'x's would be
where the actual addition occurred)).
* The end of an expression. This seems to be what GCC does /mostly/, and
certainly this for function calls. This has the advantage that
progress is always 'forwards' (never jumping backwards - except for
independent subexpressions if they're evaluated in interesting orders,
etc). "x + y + z" would go "x y z" with the additions occurring at y
and z after the respective loads.
The problem with this is that the user would still have to think
fairly hard about precedence to realize which subexpression is being
evaluated or which operator overload is being called in, say, an asan
backtrace.
* The preferred location or 'exprloc'. In this case you get sort of what
you'd expect, though it's a bit confusing in its own way due to going
'backwards'. In this case the locations would be: "x y + z +" in
lovely postfix arithmetic order. But this does mean that if the op+
were an operator overload, say, and in a backtrace, the backtrace will
point to the exact '+' that's being called, not to the end of one of
its operands.
(actually the operator overload case doesn't work yet for other reasons,
but that's being fixed - but this at least gets scalar/complex
assignments and other plain operators right)
llvm-svn: 227027
2015-01-25 09:19:10 +08:00
|
|
|
ApplyDebugLocation DL(CGF, E);
|
2015-01-18 09:48:19 +08:00
|
|
|
StmtVisitor<AggExprEmitter>::Visit(E);
|
|
|
|
}
|
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
void VisitStmt(Stmt *S) {
|
2008-08-16 08:56:44 +08:00
|
|
|
CGF.ErrorUnsupported(S, "aggregate expression");
|
2007-08-21 12:25:47 +08:00
|
|
|
}
|
|
|
|
void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
|
2011-04-15 08:35:48 +08:00
|
|
|
void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
|
|
|
|
Visit(GE->getResultExpr());
|
|
|
|
}
|
2009-01-27 17:03:41 +08:00
|
|
|
void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
|
2011-07-15 13:09:51 +08:00
|
|
|
void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
|
|
|
|
return Visit(E->getReplacement());
|
|
|
|
}
|
2007-08-21 12:25:47 +08:00
|
|
|
|
|
|
|
// l-values.
|
2012-03-10 17:33:50 +08:00
|
|
|
void VisitDeclRefExpr(DeclRefExpr *E) {
|
2012-03-10 11:05:10 +08:00
|
|
|
// For aggregates, we should always be able to emit the variable
|
|
|
|
// as an l-value unless it's a reference. This is due to the fact
|
|
|
|
// that we can't actually ever see a normal l2r conversion on an
|
|
|
|
// aggregate in C++, and in C there's no language standard
|
|
|
|
// actively preventing us from listing variables in the captures
|
|
|
|
// list of a block.
|
2012-03-10 17:33:50 +08:00
|
|
|
if (E->getDecl()->getType()->isReferenceType()) {
|
2012-03-10 11:05:10 +08:00
|
|
|
if (CodeGenFunction::ConstantEmission result
|
2012-03-10 17:33:50 +08:00
|
|
|
= CGF.tryEmitAsConstant(E)) {
|
2012-07-03 07:58:38 +08:00
|
|
|
EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
|
2012-03-10 11:05:10 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-10 17:33:50 +08:00
|
|
|
EmitAggLoadOfLValue(E);
|
2012-03-10 11:05:10 +08:00
|
|
|
}
|
|
|
|
|
2007-12-14 10:04:12 +08:00
|
|
|
void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
|
|
|
|
void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
|
2010-01-05 02:47:06 +08:00
|
|
|
void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
|
2011-06-17 12:59:12 +08:00
|
|
|
void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
|
2007-12-14 10:04:12 +08:00
|
|
|
void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
|
|
|
|
EmitAggLoadOfLValue(E);
|
|
|
|
}
|
2009-04-22 07:00:09 +08:00
|
|
|
void VisitPredefinedExpr(const PredefinedExpr *E) {
|
2009-09-09 23:08:12 +08:00
|
|
|
EmitAggLoadOfLValue(E);
|
2009-04-22 07:00:09 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
// Operators.
|
2009-08-08 07:22:37 +08:00
|
|
|
void VisitCastExpr(CastExpr *E);
|
2007-11-01 06:04:46 +08:00
|
|
|
void VisitCallExpr(const CallExpr *E);
|
2007-09-01 06:54:14 +08:00
|
|
|
void VisitStmtExpr(const StmtExpr *E);
|
2007-08-21 12:25:47 +08:00
|
|
|
void VisitBinaryOperator(const BinaryOperator *BO);
|
2009-10-23 06:57:31 +08:00
|
|
|
void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
|
2007-08-21 12:43:17 +08:00
|
|
|
void VisitBinAssign(const BinaryOperator *E);
|
2008-05-20 15:56:31 +08:00
|
|
|
void VisitBinComma(const BinaryOperator *E);
|
2007-08-21 12:25:47 +08:00
|
|
|
|
2008-06-25 01:04:18 +08:00
|
|
|
void VisitObjCMessageExpr(ObjCMessageExpr *E);
|
2008-08-23 18:51:21 +08:00
|
|
|
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
|
|
|
|
EmitAggLoadOfLValue(E);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
|
2009-07-09 02:33:14 +08:00
|
|
|
void VisitChooseExpr(const ChooseExpr *CE);
|
2007-10-27 01:44:44 +08:00
|
|
|
void VisitInitListExpr(InitListExpr *E);
|
2009-12-16 14:57:54 +08:00
|
|
|
void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
|
2008-04-08 12:40:51 +08:00
|
|
|
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
|
|
|
|
Visit(DAE->getExpr());
|
|
|
|
}
|
2013-04-21 06:23:05 +08:00
|
|
|
void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
|
|
|
|
CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
|
|
|
|
Visit(DIE->getExpr());
|
|
|
|
}
|
2009-05-31 07:23:33 +08:00
|
|
|
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
|
2009-05-04 01:47:16 +08:00
|
|
|
void VisitCXXConstructExpr(const CXXConstructExpr *E);
|
2012-02-09 11:32:31 +08:00
|
|
|
void VisitLambdaExpr(LambdaExpr *E);
|
2013-06-13 06:31:48 +08:00
|
|
|
void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
|
2010-12-06 16:20:24 +08:00
|
|
|
void VisitExprWithCleanups(ExprWithCleanups *E);
|
2010-07-08 14:14:04 +08:00
|
|
|
void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
|
2009-11-18 08:40:12 +08:00
|
|
|
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
|
2011-06-22 01:03:29 +08:00
|
|
|
void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
|
2011-02-16 16:02:54 +08:00
|
|
|
void VisitOpaqueValueExpr(OpaqueValueExpr *E);
|
|
|
|
|
2011-11-06 17:01:30 +08:00
|
|
|
void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
|
|
|
|
if (E->isGLValue()) {
|
|
|
|
LValue LV = CGF.EmitPseudoObjectLValue(E);
|
2012-07-03 07:58:38 +08:00
|
|
|
return EmitFinalDestCopy(E->getType(), LV);
|
2011-11-06 17:01:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
|
|
|
|
}
|
|
|
|
|
2008-05-27 23:51:49 +08:00
|
|
|
void VisitVAArgExpr(VAArgExpr *E);
|
2008-04-05 02:42:16 +08:00
|
|
|
|
2012-03-30 01:37:10 +08:00
|
|
|
void EmitInitializationToLValue(Expr *E, LValue Address);
|
2011-06-16 12:16:24 +08:00
|
|
|
void EmitNullInitializationToLValue(LValue Address);
|
2007-08-21 12:25:47 +08:00
|
|
|
// case Expr::ChooseExprClass:
|
2009-12-10 03:24:08 +08:00
|
|
|
void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
|
2011-10-11 10:20:01 +08:00
|
|
|
void VisitAtomicExpr(AtomicExpr *E) {
|
|
|
|
CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
|
|
|
|
}
|
2007-08-21 12:25:47 +08:00
|
|
|
};
|
|
|
|
} // end anonymous namespace.
|
|
|
|
|
2007-08-21 12:59:27 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Utilities
|
|
|
|
//===----------------------------------------------------------------------===//
|
2007-08-21 12:25:47 +08:00
|
|
|
|
2007-08-11 08:04:45 +08:00
|
|
|
/// EmitAggLoadOfLValue - Given an expression with aggregate type that
|
|
|
|
/// represents a value lvalue, this method emits the address of the lvalue,
|
|
|
|
/// then loads the result into DestPtr.
|
2007-08-21 12:25:47 +08:00
|
|
|
void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
|
|
|
|
LValue LV = CGF.EmitLValue(E);
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
// If the type of the l-value is atomic, then do an atomic load.
|
2015-02-14 09:35:12 +08:00
|
|
|
if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
|
2013-10-02 10:29:49 +08:00
|
|
|
CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
|
2013-03-08 05:37:17 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
EmitFinalDestCopy(E->getType(), LV);
|
2009-05-24 04:28:01 +08:00
|
|
|
}
|
|
|
|
|
2010-05-23 06:13:32 +08:00
|
|
|
/// \brief True if the given aggregate type requires special GC API calls.
|
|
|
|
bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
|
|
|
|
// Only record types have members that might require garbage collection.
|
|
|
|
const RecordType *RecordTy = T->getAs<RecordType>();
|
|
|
|
if (!RecordTy) return false;
|
|
|
|
|
|
|
|
// Don't mess with non-trivial C++ types.
|
|
|
|
RecordDecl *Record = RecordTy->getDecl();
|
|
|
|
if (isa<CXXRecordDecl>(Record) &&
|
2012-11-16 08:53:38 +08:00
|
|
|
(cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
|
2010-05-23 06:13:32 +08:00
|
|
|
!cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check whether the type has an object member.
|
|
|
|
return Record->hasObjectMember();
|
|
|
|
}
|
|
|
|
|
2011-08-26 07:04:34 +08:00
|
|
|
/// \brief Perform the final move to DestPtr if for some reason
|
|
|
|
/// getReturnValueSlot() didn't use it directly.
|
2010-05-23 06:13:32 +08:00
|
|
|
///
|
|
|
|
/// The idea is that you do something like this:
|
|
|
|
/// RValue Result = EmitSomething(..., getReturnValueSlot());
|
2011-08-26 07:04:34 +08:00
|
|
|
/// EmitMoveFromReturnSlot(E, Result);
|
|
|
|
///
|
|
|
|
/// If nothing interferes, this will cause the result to be emitted
|
|
|
|
/// directly into the return value slot. Otherwise, a final move
|
|
|
|
/// will be performed.
|
2012-07-03 07:58:38 +08:00
|
|
|
void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
|
2011-08-26 07:04:34 +08:00
|
|
|
if (shouldUseDestForReturnSlot()) {
|
|
|
|
// Logically, Dest.getAddr() should equal Src.getAggregateAddr().
|
|
|
|
// The possibility of undef rvalues complicates that a lot,
|
|
|
|
// though, so we can't really assert.
|
|
|
|
return;
|
2010-06-16 06:44:06 +08:00
|
|
|
}
|
2011-08-26 07:04:34 +08:00
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
// Otherwise, copy from there to the destination.
|
|
|
|
assert(Dest.getAddr() != src.getAggregateAddr());
|
|
|
|
std::pair<CharUnits, CharUnits> typeInfo =
|
2012-04-17 09:14:29 +08:00
|
|
|
CGF.getContext().getTypeInfoInChars(E->getType());
|
2012-07-03 07:58:38 +08:00
|
|
|
EmitFinalDestCopy(E->getType(), src, typeInfo.second);
|
2010-05-23 06:13:32 +08:00
|
|
|
}
|
|
|
|
|
2009-05-24 04:28:01 +08:00
|
|
|
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
2012-07-03 07:58:38 +08:00
|
|
|
void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src,
|
|
|
|
CharUnits srcAlign) {
|
|
|
|
assert(src.isAggregate() && "value must be aggregate value!");
|
|
|
|
LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign);
|
|
|
|
EmitFinalDestCopy(type, srcLV);
|
|
|
|
}
|
2009-05-24 04:28:01 +08:00
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
|
|
|
void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
|
2010-09-15 18:14:12 +08:00
|
|
|
// If Dest is ignored, then we're evaluating an aggregate expression
|
2012-07-03 07:58:38 +08:00
|
|
|
// in a context that doesn't care about the result. Note that loads
|
|
|
|
// from volatile l-values force the existence of a non-ignored
|
|
|
|
// destination.
|
|
|
|
if (Dest.isIgnored())
|
|
|
|
return;
|
2010-10-23 06:05:03 +08:00
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
AggValueSlot srcAgg =
|
|
|
|
AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
|
|
|
|
needsGC(type), AggValueSlot::IsAliased);
|
|
|
|
EmitCopy(type, Dest, srcAgg);
|
|
|
|
}
|
2007-08-11 08:04:45 +08:00
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
/// Perform a copy from the source into the destination.
|
|
|
|
///
|
|
|
|
/// \param type - the type of the aggregate being copied; qualifiers are
|
|
|
|
/// ignored
|
|
|
|
void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
|
|
|
|
const AggValueSlot &src) {
|
|
|
|
if (dest.requiresGCollection()) {
|
|
|
|
CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
|
|
|
|
llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
|
2009-09-01 03:33:16 +08:00
|
|
|
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
|
2012-07-03 07:58:38 +08:00
|
|
|
dest.getAddr(),
|
|
|
|
src.getAddr(),
|
|
|
|
size);
|
2009-09-01 03:33:16 +08:00
|
|
|
return;
|
|
|
|
}
|
2009-05-24 04:28:01 +08:00
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
// If the result of the assignment is used, copy the LHS there also.
|
|
|
|
// It's volatile if either side is. Use the minimum alignment of
|
|
|
|
// the two sides.
|
|
|
|
CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type,
|
|
|
|
dest.isVolatile() || src.isVolatile(),
|
|
|
|
std::min(dest.getAlignment(), src.getAlignment()));
|
2007-08-11 08:04:45 +08:00
|
|
|
}
|
|
|
|
|
2012-02-17 16:42:25 +08:00
|
|
|
/// \brief Emit the initializer for a std::initializer_list initialized with a
|
|
|
|
/// real initializer list.
|
2013-06-13 06:31:48 +08:00
|
|
|
void
|
|
|
|
AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
|
|
|
|
// Emit an array containing the elements. The array is externally destructed
|
|
|
|
// if the std::initializer_list object is.
|
|
|
|
ASTContext &Ctx = CGF.getContext();
|
|
|
|
LValue Array = CGF.EmitLValue(E->getSubExpr());
|
|
|
|
assert(Array.isSimple() && "initializer_list array not a simple lvalue");
|
|
|
|
llvm::Value *ArrayPtr = Array.getAddress();
|
|
|
|
|
|
|
|
const ConstantArrayType *ArrayType =
|
|
|
|
Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
|
|
|
|
assert(ArrayType && "std::initializer_list constructed from non-array");
|
|
|
|
|
|
|
|
// FIXME: Perform the checks on the field types in SemaInit.
|
|
|
|
RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
|
|
|
|
RecordDecl::field_iterator Field = Record->field_begin();
|
|
|
|
if (Field == Record->field_end()) {
|
|
|
|
CGF.ErrorUnsupported(E, "weird std::initializer_list");
|
2012-02-26 04:51:13 +08:00
|
|
|
return;
|
2012-02-17 16:42:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start pointer.
|
2013-06-13 06:31:48 +08:00
|
|
|
if (!Field->getType()->isPointerType() ||
|
|
|
|
!Ctx.hasSameType(Field->getType()->getPointeeType(),
|
|
|
|
ArrayType->getElementType())) {
|
|
|
|
CGF.ErrorUnsupported(E, "weird std::initializer_list");
|
2012-02-26 04:51:13 +08:00
|
|
|
return;
|
2012-02-17 16:42:25 +08:00
|
|
|
}
|
2013-06-13 06:31:48 +08:00
|
|
|
|
|
|
|
AggValueSlot Dest = EnsureSlot(E->getType());
|
|
|
|
LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
|
|
|
|
Dest.getAlignment());
|
|
|
|
LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
|
|
|
|
llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
|
|
|
|
llvm::Value *IdxStart[] = { Zero, Zero };
|
|
|
|
llvm::Value *ArrayStart =
|
|
|
|
Builder.CreateInBoundsGEP(ArrayPtr, IdxStart, "arraystart");
|
|
|
|
CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
|
|
|
|
++Field;
|
|
|
|
|
|
|
|
if (Field == Record->field_end()) {
|
|
|
|
CGF.ErrorUnsupported(E, "weird std::initializer_list");
|
2012-02-26 04:51:13 +08:00
|
|
|
return;
|
2012-02-17 16:42:25 +08:00
|
|
|
}
|
2013-06-13 06:31:48 +08:00
|
|
|
|
|
|
|
llvm::Value *Size = Builder.getInt(ArrayType->getSize());
|
|
|
|
LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
|
|
|
|
if (Field->getType()->isPointerType() &&
|
|
|
|
Ctx.hasSameType(Field->getType()->getPointeeType(),
|
|
|
|
ArrayType->getElementType())) {
|
2012-02-17 16:42:25 +08:00
|
|
|
// End pointer.
|
2013-06-13 06:31:48 +08:00
|
|
|
llvm::Value *IdxEnd[] = { Zero, Size };
|
|
|
|
llvm::Value *ArrayEnd =
|
|
|
|
Builder.CreateInBoundsGEP(ArrayPtr, IdxEnd, "arrayend");
|
|
|
|
CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
|
|
|
|
} else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
|
2012-02-17 16:42:25 +08:00
|
|
|
// Length.
|
2013-06-13 06:31:48 +08:00
|
|
|
CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
|
2012-02-17 16:42:25 +08:00
|
|
|
} else {
|
2013-06-13 06:31:48 +08:00
|
|
|
CGF.ErrorUnsupported(E, "weird std::initializer_list");
|
2012-02-26 04:51:13 +08:00
|
|
|
return;
|
2012-02-17 16:42:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-14 07:04:49 +08:00
|
|
|
/// \brief Determine if E is a trivial array filler, that is, one that is
|
|
|
|
/// equivalent to zero-initialization.
|
|
|
|
static bool isTrivialFiller(Expr *E) {
|
|
|
|
if (!E)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (isa<ImplicitValueInitExpr>(E))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (auto *ILE = dyn_cast<InitListExpr>(E)) {
|
|
|
|
if (ILE->getNumInits())
|
|
|
|
return false;
|
|
|
|
return isTrivialFiller(ILE->getArrayFiller());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
|
|
|
|
return Cons->getConstructor()->isDefaultConstructor() &&
|
|
|
|
Cons->getConstructor()->isTrivial();
|
|
|
|
|
|
|
|
// FIXME: Are there other cases where we can avoid emitting an initializer?
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-02-17 16:42:25 +08:00
|
|
|
/// \brief Emit initialization of an array from an initializer list.
|
|
|
|
void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
|
|
|
|
QualType elementType, InitListExpr *E) {
|
|
|
|
uint64_t NumInitElements = E->getNumInits();
|
|
|
|
|
|
|
|
uint64_t NumArrayElements = AType->getNumElements();
|
|
|
|
assert(NumInitElements <= NumArrayElements);
|
|
|
|
|
|
|
|
// DestPtr is an array*. Construct an elementType* by drilling
|
|
|
|
// down a level.
|
|
|
|
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
|
|
|
|
llvm::Value *indices[] = { zero, zero };
|
|
|
|
llvm::Value *begin =
|
|
|
|
Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
|
|
|
|
|
|
|
|
// Exception safety requires us to destroy all the
|
|
|
|
// already-constructed members if an initializer throws.
|
|
|
|
// For that, we'll need an EH cleanup.
|
|
|
|
QualType::DestructionKind dtorKind = elementType.isDestructedType();
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::AllocaInst *endOfInit = nullptr;
|
2012-02-17 16:42:25 +08:00
|
|
|
EHScopeStack::stable_iterator cleanup;
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Instruction *cleanupDominator = nullptr;
|
2012-02-17 16:42:25 +08:00
|
|
|
if (CGF.needsEHCleanup(dtorKind)) {
|
|
|
|
// In principle we could tell the cleanup where we are more
|
|
|
|
// directly, but the control flow can get so varied here that it
|
|
|
|
// would actually be quite complex. Therefore we go through an
|
|
|
|
// alloca.
|
|
|
|
endOfInit = CGF.CreateTempAlloca(begin->getType(),
|
|
|
|
"arrayinit.endOfInit");
|
|
|
|
cleanupDominator = Builder.CreateStore(begin, endOfInit);
|
|
|
|
CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
|
|
|
|
CGF.getDestroyer(dtorKind));
|
|
|
|
cleanup = CGF.EHStack.stable_begin();
|
|
|
|
|
|
|
|
// Otherwise, remember that we didn't need a cleanup.
|
|
|
|
} else {
|
|
|
|
dtorKind = QualType::DK_none;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
|
|
|
|
|
|
|
|
// The 'current element to initialize'. The invariants on this
|
|
|
|
// variable are complicated. Essentially, after each iteration of
|
|
|
|
// the loop, it points to the last initialized element, except
|
|
|
|
// that it points to the beginning of the array before any
|
|
|
|
// elements have been initialized.
|
|
|
|
llvm::Value *element = begin;
|
|
|
|
|
|
|
|
// Emit the explicit initializers.
|
|
|
|
for (uint64_t i = 0; i != NumInitElements; ++i) {
|
|
|
|
// Advance to the next element.
|
|
|
|
if (i > 0) {
|
|
|
|
element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
|
|
|
|
|
|
|
|
// Tell the cleanup that it needs to destroy up to this
|
|
|
|
// element. TODO: some of these stores can be trivially
|
|
|
|
// observed to be unnecessary.
|
|
|
|
if (endOfInit) Builder.CreateStore(element, endOfInit);
|
|
|
|
}
|
|
|
|
|
2013-06-13 06:31:48 +08:00
|
|
|
LValue elementLV = CGF.MakeAddrLValue(element, elementType);
|
|
|
|
EmitInitializationToLValue(E->getInit(i), elementLV);
|
2012-02-17 16:42:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether there's a non-trivial array-fill expression.
|
|
|
|
Expr *filler = E->getArrayFiller();
|
2014-06-14 07:04:49 +08:00
|
|
|
bool hasTrivialFiller = isTrivialFiller(filler);
|
2012-02-17 16:42:25 +08:00
|
|
|
|
|
|
|
// Any remaining elements need to be zero-initialized, possibly
|
|
|
|
// using the filler expression. We can skip this if the we're
|
|
|
|
// emitting to zeroed memory.
|
|
|
|
if (NumInitElements != NumArrayElements &&
|
|
|
|
!(Dest.isZeroed() && hasTrivialFiller &&
|
|
|
|
CGF.getTypes().isZeroInitializable(elementType))) {
|
|
|
|
|
|
|
|
// Use an actual loop. This is basically
|
|
|
|
// do { *array++ = filler; } while (array != end);
|
|
|
|
|
|
|
|
// Advance to the start of the rest of the array.
|
|
|
|
if (NumInitElements) {
|
|
|
|
element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
|
|
|
|
if (endOfInit) Builder.CreateStore(element, endOfInit);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the end of the array.
|
|
|
|
llvm::Value *end = Builder.CreateInBoundsGEP(begin,
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
|
|
|
|
"arrayinit.end");
|
|
|
|
|
|
|
|
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
|
|
|
|
llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
|
|
|
|
|
|
|
|
// Jump into the body.
|
|
|
|
CGF.EmitBlock(bodyBB);
|
|
|
|
llvm::PHINode *currentElement =
|
|
|
|
Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
|
|
|
|
currentElement->addIncoming(element, entryBB);
|
|
|
|
|
|
|
|
// Emit the actual filler expression.
|
|
|
|
LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
|
|
|
|
if (filler)
|
2012-03-30 01:37:10 +08:00
|
|
|
EmitInitializationToLValue(filler, elementLV);
|
2012-02-17 16:42:25 +08:00
|
|
|
else
|
|
|
|
EmitNullInitializationToLValue(elementLV);
|
|
|
|
|
|
|
|
// Move on to the next element.
|
|
|
|
llvm::Value *nextElement =
|
|
|
|
Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
|
|
|
|
|
|
|
|
// Tell the EH cleanup that we finished with the last element.
|
|
|
|
if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
|
|
|
|
|
|
|
|
// Leave the loop if we're done.
|
|
|
|
llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
|
|
|
|
"arrayinit.done");
|
|
|
|
llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
|
|
|
|
Builder.CreateCondBr(done, endBB, bodyBB);
|
|
|
|
currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
|
|
|
|
|
|
|
|
CGF.EmitBlock(endBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Leave the partial-array cleanup if we entered one.
|
|
|
|
if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
|
|
|
|
}
|
|
|
|
|
2007-08-21 12:59:27 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Visitor Methods
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2011-06-22 01:03:29 +08:00
|
|
|
void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
|
|
|
|
Visit(E->GetTemporaryExpr());
|
|
|
|
}
|
|
|
|
|
2011-02-16 16:02:54 +08:00
|
|
|
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
|
2012-07-03 07:58:38 +08:00
|
|
|
EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
|
2011-02-16 16:02:54 +08:00
|
|
|
}
|
|
|
|
|
2011-06-17 12:59:12 +08:00
|
|
|
void
|
|
|
|
AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
|
2013-03-08 05:36:54 +08:00
|
|
|
if (Dest.isPotentiallyAliased() &&
|
|
|
|
E->getType().isPODType(CGF.getContext())) {
|
2011-06-18 00:37:20 +08:00
|
|
|
// For a POD type, just emit a load of the lvalue + a copy, because our
|
|
|
|
// compound literal might alias the destination.
|
|
|
|
EmitAggLoadOfLValue(E);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-06-17 12:59:12 +08:00
|
|
|
AggValueSlot Slot = EnsureSlot(E->getType());
|
|
|
|
CGF.EmitAggExpr(E->getInitializer(), Slot);
|
|
|
|
}
|
|
|
|
|
2013-03-08 05:37:17 +08:00
|
|
|
/// Attempt to look through various unimportant expressions to find a
|
|
|
|
/// cast of the given kind.
|
|
|
|
static Expr *findPeephole(Expr *op, CastKind kind) {
|
|
|
|
while (true) {
|
|
|
|
op = op->IgnoreParens();
|
|
|
|
if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
|
|
|
|
if (castE->getCastKind() == kind)
|
|
|
|
return castE->getSubExpr();
|
|
|
|
if (castE->getCastKind() == CK_NoOp)
|
|
|
|
continue;
|
|
|
|
}
|
2014-05-21 13:09:00 +08:00
|
|
|
return nullptr;
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
}
|
2011-06-17 12:59:12 +08:00
|
|
|
|
2009-08-08 07:22:37 +08:00
|
|
|
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
|
2009-09-29 09:23:39 +08:00
|
|
|
switch (E->getCastKind()) {
|
2011-04-11 10:03:26 +08:00
|
|
|
case CK_Dynamic: {
|
2012-08-24 08:54:33 +08:00
|
|
|
// FIXME: Can this actually happen? We have no test coverage for it.
|
2010-05-15 05:31:02 +08:00
|
|
|
assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
|
2012-08-24 08:54:33 +08:00
|
|
|
LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
|
2012-09-08 10:08:36 +08:00
|
|
|
CodeGenFunction::TCK_Load);
|
2010-05-15 05:31:02 +08:00
|
|
|
// FIXME: Do we also need to handle property references here?
|
|
|
|
if (LV.isSimple())
|
|
|
|
CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
|
|
|
|
else
|
|
|
|
CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
|
|
|
|
|
2010-09-15 18:14:12 +08:00
|
|
|
if (!Dest.isIgnored())
|
|
|
|
CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
|
2010-05-15 05:31:02 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_ToUnion: {
|
2015-05-21 05:59:25 +08:00
|
|
|
// Evaluate even if the destination is ignored.
|
|
|
|
if (Dest.isIgnored()) {
|
|
|
|
CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
|
|
|
|
/*ignoreResult=*/true);
|
|
|
|
break;
|
|
|
|
}
|
2011-04-13 06:02:02 +08:00
|
|
|
|
2009-08-08 07:22:37 +08:00
|
|
|
// GCC union extension
|
2010-08-21 11:15:20 +08:00
|
|
|
QualType Ty = E->getSubExpr()->getType();
|
|
|
|
QualType PtrTy = CGF.getContext().getPointerType(Ty);
|
2010-09-15 18:14:12 +08:00
|
|
|
llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
|
2009-06-04 04:45:06 +08:00
|
|
|
CGF.ConvertType(PtrTy));
|
2011-06-16 12:16:24 +08:00
|
|
|
EmitInitializationToLValue(E->getSubExpr(),
|
2012-03-30 01:37:10 +08:00
|
|
|
CGF.MakeAddrLValue(CastPtr, Ty));
|
2009-09-29 09:23:39 +08:00
|
|
|
break;
|
2009-01-16 04:14:33 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_DerivedToBase:
|
|
|
|
case CK_BaseToDerived:
|
|
|
|
case CK_UncheckedDerivedToBase: {
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
|
2010-05-22 13:17:18 +08:00
|
|
|
"should have been unpacked before we got here");
|
|
|
|
}
|
|
|
|
|
2013-03-08 05:37:17 +08:00
|
|
|
case CK_NonAtomicToAtomic:
|
|
|
|
case CK_AtomicToNonAtomic: {
|
|
|
|
bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
|
|
|
|
|
|
|
|
// Determine the atomic and value types.
|
|
|
|
QualType atomicType = E->getSubExpr()->getType();
|
|
|
|
QualType valueType = E->getType();
|
|
|
|
if (isToAtomic) std::swap(atomicType, valueType);
|
|
|
|
|
|
|
|
assert(atomicType->isAtomicType());
|
|
|
|
assert(CGF.getContext().hasSameUnqualifiedType(valueType,
|
|
|
|
atomicType->castAs<AtomicType>()->getValueType()));
|
|
|
|
|
|
|
|
// Just recurse normally if we're ignoring the result or the
|
|
|
|
// atomic type doesn't change representation.
|
|
|
|
if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
|
|
|
|
return Visit(E->getSubExpr());
|
|
|
|
}
|
|
|
|
|
|
|
|
CastKind peepholeTarget =
|
|
|
|
(isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
|
|
|
|
|
|
|
|
// These two cases are reverses of each other; try to peephole them.
|
|
|
|
if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
|
|
|
|
assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
|
|
|
|
E->getType()) &&
|
|
|
|
"peephole significantly changed types?");
|
|
|
|
return Visit(op);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're converting an r-value of non-atomic type to an r-value
|
2013-07-11 09:32:21 +08:00
|
|
|
// of atomic type, just emit directly into the relevant sub-object.
|
2013-03-08 05:37:17 +08:00
|
|
|
if (isToAtomic) {
|
2013-07-11 09:32:21 +08:00
|
|
|
AggValueSlot valueDest = Dest;
|
|
|
|
if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
|
|
|
|
// Zero-initialize. (Strictly speaking, we only need to intialize
|
|
|
|
// the padding at the end, but this is simpler.)
|
|
|
|
if (!Dest.isZeroed())
|
2013-07-11 10:28:36 +08:00
|
|
|
CGF.EmitNullInitialization(Dest.getAddr(), atomicType);
|
2013-07-11 09:32:21 +08:00
|
|
|
|
|
|
|
// Build a GEP to refer to the subobject.
|
|
|
|
llvm::Value *valueAddr =
|
2015-04-06 06:45:47 +08:00
|
|
|
CGF.Builder.CreateStructGEP(nullptr, valueDest.getAddr(), 0);
|
2013-07-11 09:32:21 +08:00
|
|
|
valueDest = AggValueSlot::forAddr(valueAddr,
|
|
|
|
valueDest.getAlignment(),
|
|
|
|
valueDest.getQualifiers(),
|
|
|
|
valueDest.isExternallyDestructed(),
|
|
|
|
valueDest.requiresGCollection(),
|
|
|
|
valueDest.isPotentiallyAliased(),
|
|
|
|
AggValueSlot::IsZeroed);
|
|
|
|
}
|
|
|
|
|
2013-07-11 10:28:36 +08:00
|
|
|
CGF.EmitAggExpr(E->getSubExpr(), valueDest);
|
2013-03-08 05:37:17 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we're converting an atomic type to a non-atomic type.
|
2013-07-11 09:32:21 +08:00
|
|
|
// Make an atomic temporary, emit into that, and then copy the value out.
|
2013-03-08 05:37:17 +08:00
|
|
|
AggValueSlot atomicSlot =
|
|
|
|
CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
|
|
|
|
CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
|
|
|
|
|
|
|
|
llvm::Value *valueAddr =
|
2015-04-06 06:47:07 +08:00
|
|
|
Builder.CreateStructGEP(nullptr, atomicSlot.getAddr(), 0);
|
2013-03-08 05:37:17 +08:00
|
|
|
RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
|
|
|
|
return EmitFinalDestCopy(valueType, rvalue);
|
|
|
|
}
|
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
case CK_LValueToRValue:
|
|
|
|
// If we're loading from a volatile type, force the destination
|
|
|
|
// into existence.
|
|
|
|
if (E->getSubExpr()->getType().isVolatileQualified()) {
|
|
|
|
EnsureDest(E->getType());
|
|
|
|
return Visit(E->getSubExpr());
|
|
|
|
}
|
2013-03-08 05:37:17 +08:00
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
// fallthrough
|
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_NoOp:
|
|
|
|
case CK_UserDefinedConversion:
|
|
|
|
case CK_ConstructorConversion:
|
2009-09-29 09:23:39 +08:00
|
|
|
assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
|
|
|
|
E->getType()) &&
|
|
|
|
"Implicit cast types must be compatible");
|
|
|
|
Visit(E->getSubExpr());
|
|
|
|
break;
|
2010-12-01 12:43:34 +08:00
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_LValueBitCast:
|
2010-12-01 12:43:34 +08:00
|
|
|
llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
|
2011-04-07 16:22:57 +08:00
|
|
|
|
2010-12-01 12:43:34 +08:00
|
|
|
case CK_Dependent:
|
|
|
|
case CK_BitCast:
|
|
|
|
case CK_ArrayToPointerDecay:
|
|
|
|
case CK_FunctionToPointerDecay:
|
|
|
|
case CK_NullToPointer:
|
|
|
|
case CK_NullToMemberPointer:
|
|
|
|
case CK_BaseToDerivedMemberPointer:
|
|
|
|
case CK_DerivedToBaseMemberPointer:
|
|
|
|
case CK_MemberPointerToBoolean:
|
2012-02-15 09:22:51 +08:00
|
|
|
case CK_ReinterpretMemberPointer:
|
2010-12-01 12:43:34 +08:00
|
|
|
case CK_IntegralToPointer:
|
|
|
|
case CK_PointerToIntegral:
|
|
|
|
case CK_PointerToBoolean:
|
|
|
|
case CK_ToVoid:
|
|
|
|
case CK_VectorSplat:
|
|
|
|
case CK_IntegralCast:
|
|
|
|
case CK_IntegralToBoolean:
|
|
|
|
case CK_IntegralToFloating:
|
|
|
|
case CK_FloatingToIntegral:
|
|
|
|
case CK_FloatingToBoolean:
|
|
|
|
case CK_FloatingCast:
|
2011-09-09 13:25:32 +08:00
|
|
|
case CK_CPointerToObjCPointerCast:
|
|
|
|
case CK_BlockPointerToObjCPointerCast:
|
2010-12-01 12:43:34 +08:00
|
|
|
case CK_AnyPointerToBlockPointerCast:
|
|
|
|
case CK_ObjCObjectLValueCast:
|
|
|
|
case CK_FloatingRealToComplex:
|
|
|
|
case CK_FloatingComplexToReal:
|
|
|
|
case CK_FloatingComplexToBoolean:
|
|
|
|
case CK_FloatingComplexCast:
|
|
|
|
case CK_FloatingComplexToIntegralComplex:
|
|
|
|
case CK_IntegralRealToComplex:
|
|
|
|
case CK_IntegralComplexToReal:
|
|
|
|
case CK_IntegralComplexToBoolean:
|
|
|
|
case CK_IntegralComplexCast:
|
|
|
|
case CK_IntegralComplexToFloatingComplex:
|
2011-09-10 14:18:15 +08:00
|
|
|
case CK_ARCProduceObject:
|
|
|
|
case CK_ARCConsumeObject:
|
|
|
|
case CK_ARCReclaimReturnedObject:
|
|
|
|
case CK_ARCExtendBlockObject:
|
2012-02-22 13:02:47 +08:00
|
|
|
case CK_CopyAndAutoreleaseBlockObject:
|
2012-08-31 08:14:07 +08:00
|
|
|
case CK_BuiltinFnToFnPtr:
|
2013-01-20 20:31:11 +08:00
|
|
|
case CK_ZeroToOCLEvent:
|
2013-12-11 21:39:46 +08:00
|
|
|
case CK_AddressSpaceConversion:
|
2010-12-01 12:43:34 +08:00
|
|
|
llvm_unreachable("cast kind invalid for aggregate types");
|
2009-09-29 09:23:39 +08:00
|
|
|
}
|
2008-01-14 14:28:57 +08:00
|
|
|
}
|
|
|
|
|
2008-07-27 06:37:01 +08:00
|
|
|
void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
|
2015-02-26 01:36:15 +08:00
|
|
|
if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
|
2009-05-28 00:45:02 +08:00
|
|
|
EmitAggLoadOfLValue(E);
|
|
|
|
return;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-05-23 06:13:32 +08:00
|
|
|
RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
|
2011-08-26 07:04:34 +08:00
|
|
|
EmitMoveFromReturnSlot(E, RV);
|
2008-01-31 13:38:29 +08:00
|
|
|
}
|
2008-07-27 06:37:01 +08:00
|
|
|
|
|
|
|
void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
|
2010-05-23 06:13:32 +08:00
|
|
|
RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
|
2011-08-26 07:04:34 +08:00
|
|
|
EmitMoveFromReturnSlot(E, RV);
|
2008-06-25 01:04:18 +08:00
|
|
|
}
|
2008-01-31 13:38:29 +08:00
|
|
|
|
2008-07-27 06:37:01 +08:00
|
|
|
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
|
2010-12-05 10:00:02 +08:00
|
|
|
CGF.EmitIgnoredExpr(E->getLHS());
|
2010-09-15 18:14:12 +08:00
|
|
|
Visit(E->getRHS());
|
2008-05-20 15:56:31 +08:00
|
|
|
}
|
|
|
|
|
2007-09-01 06:54:14 +08:00
|
|
|
void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
|
2011-01-26 12:00:11 +08:00
|
|
|
CodeGenFunction::StmtExprEvaluation eval(CGF);
|
2010-09-15 18:14:12 +08:00
|
|
|
CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
|
2007-09-01 06:54:14 +08:00
|
|
|
}
|
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
|
2010-08-25 19:45:40 +08:00
|
|
|
if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
|
2009-10-23 06:57:31 +08:00
|
|
|
VisitPointerToDataMemberBinaryOperator(E);
|
|
|
|
else
|
|
|
|
CGF.ErrorUnsupported(E, "aggregate binary expression");
|
|
|
|
}
|
|
|
|
|
|
|
|
void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
|
|
|
|
const BinaryOperator *E) {
|
|
|
|
LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
|
2012-07-03 07:58:38 +08:00
|
|
|
EmitFinalDestCopy(E->getType(), LV);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Is the value of the given expression possibly a reference to or
|
|
|
|
/// into a __block variable?
|
|
|
|
static bool isBlockVarRef(const Expr *E) {
|
|
|
|
// Make sure we look through parens.
|
|
|
|
E = E->IgnoreParens();
|
|
|
|
|
|
|
|
// Check for a direct reference to a __block variable.
|
|
|
|
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
|
|
|
|
const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
|
|
|
|
return (var && var->hasAttr<BlocksAttr>());
|
|
|
|
}
|
|
|
|
|
|
|
|
// More complicated stuff.
|
|
|
|
|
|
|
|
// Binary operators.
|
|
|
|
if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
|
|
|
|
// For an assignment or pointer-to-member operation, just care
|
|
|
|
// about the LHS.
|
|
|
|
if (op->isAssignmentOp() || op->isPtrMemOp())
|
|
|
|
return isBlockVarRef(op->getLHS());
|
|
|
|
|
|
|
|
// For a comma, just care about the RHS.
|
|
|
|
if (op->getOpcode() == BO_Comma)
|
|
|
|
return isBlockVarRef(op->getRHS());
|
|
|
|
|
|
|
|
// FIXME: pointer arithmetic?
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check both sides of a conditional operator.
|
|
|
|
} else if (const AbstractConditionalOperator *op
|
|
|
|
= dyn_cast<AbstractConditionalOperator>(E)) {
|
|
|
|
return isBlockVarRef(op->getTrueExpr())
|
|
|
|
|| isBlockVarRef(op->getFalseExpr());
|
|
|
|
|
|
|
|
// OVEs are required to support BinaryConditionalOperators.
|
|
|
|
} else if (const OpaqueValueExpr *op
|
|
|
|
= dyn_cast<OpaqueValueExpr>(E)) {
|
|
|
|
if (const Expr *src = op->getSourceExpr())
|
|
|
|
return isBlockVarRef(src);
|
|
|
|
|
|
|
|
// Casts are necessary to get things like (*(int*)&var) = foo().
|
|
|
|
// We don't really care about the kind of cast here, except
|
|
|
|
// we don't want to look through l2r casts, because it's okay
|
|
|
|
// to get the *value* in a __block variable.
|
|
|
|
} else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
|
|
|
|
if (cast->getCastKind() == CK_LValueToRValue)
|
|
|
|
return false;
|
|
|
|
return isBlockVarRef(cast->getSubExpr());
|
|
|
|
|
|
|
|
// Handle unary operators. Again, just aggressively look through
|
|
|
|
// it, ignoring the operation.
|
|
|
|
} else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
|
|
|
|
return isBlockVarRef(uop->getSubExpr());
|
|
|
|
|
|
|
|
// Look into the base of a field access.
|
|
|
|
} else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
|
|
|
|
return isBlockVarRef(mem->getBase());
|
|
|
|
|
|
|
|
// Look into the base of a subscript.
|
|
|
|
} else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
|
|
|
|
return isBlockVarRef(sub->getBase());
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2007-08-21 12:59:27 +08:00
|
|
|
}
|
|
|
|
|
2007-08-21 12:43:17 +08:00
|
|
|
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
|
2008-02-11 09:09:17 +08:00
|
|
|
// For an assignment to work, the value on the right has
|
|
|
|
// to be compatible with the value on the left.
|
2009-05-29 07:04:00 +08:00
|
|
|
assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
|
|
|
|
E->getRHS()->getType())
|
2008-02-11 09:09:17 +08:00
|
|
|
&& "Invalid assignment");
|
2010-12-06 14:10:02 +08:00
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
// If the LHS might be a __block variable, and the RHS can
|
|
|
|
// potentially cause a block copy, we need to evaluate the RHS first
|
|
|
|
// so that the assignment goes the right place.
|
|
|
|
// This is pretty semantically fragile.
|
|
|
|
if (isBlockVarRef(E->getLHS()) &&
|
|
|
|
E->getRHS()->HasSideEffects(CGF.getContext())) {
|
|
|
|
// Ensure that we have a destination, and evaluate the RHS into that.
|
|
|
|
EnsureDest(E->getRHS()->getType());
|
|
|
|
Visit(E->getRHS());
|
|
|
|
|
|
|
|
// Now emit the LHS and copy into it.
|
2012-10-10 03:52:38 +08:00
|
|
|
LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
|
2012-07-03 07:58:38 +08:00
|
|
|
|
2013-03-08 05:37:17 +08:00
|
|
|
// That copy is an atomic copy if the LHS is atomic.
|
2015-02-14 09:35:12 +08:00
|
|
|
if (LHS.getType()->isAtomicType() ||
|
|
|
|
CGF.LValueIsSuitableForInlineAtomic(LHS)) {
|
2013-03-08 05:37:17 +08:00
|
|
|
CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
EmitCopy(E->getLHS()->getType(),
|
|
|
|
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
|
|
|
|
needsGC(E->getLHS()->getType()),
|
|
|
|
AggValueSlot::IsAliased),
|
|
|
|
Dest);
|
|
|
|
return;
|
|
|
|
}
|
2012-03-30 01:37:10 +08:00
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
LValue LHS = CGF.EmitLValue(E->getLHS());
|
2007-08-11 08:04:45 +08:00
|
|
|
|
2013-03-08 05:37:17 +08:00
|
|
|
// If we have an atomic type, evaluate into the destination and then
|
|
|
|
// do an atomic copy.
|
2015-02-14 09:35:12 +08:00
|
|
|
if (LHS.getType()->isAtomicType() ||
|
|
|
|
CGF.LValueIsSuitableForInlineAtomic(LHS)) {
|
2013-03-08 05:37:17 +08:00
|
|
|
EnsureDest(E->getRHS()->getType());
|
|
|
|
Visit(E->getRHS());
|
|
|
|
CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-11-07 11:59:57 +08:00
|
|
|
// Codegen the RHS so that it stores directly into the LHS.
|
|
|
|
AggValueSlot LHSSlot =
|
|
|
|
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
|
|
|
|
needsGC(E->getLHS()->getType()),
|
2012-03-30 01:37:10 +08:00
|
|
|
AggValueSlot::IsAliased);
|
2013-01-26 07:57:05 +08:00
|
|
|
// A non-volatile aggregate destination might have volatile member.
|
|
|
|
if (!LHSSlot.isVolatile() &&
|
|
|
|
CGF.hasVolatileMember(E->getLHS()->getType()))
|
|
|
|
LHSSlot.setVolatile(true);
|
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
CGF.EmitAggExpr(E->getRHS(), LHSSlot);
|
|
|
|
|
|
|
|
// Copy into the destination if the assignment isn't ignored.
|
|
|
|
EmitFinalDestCopy(E->getType(), LHS);
|
2007-08-11 08:04:45 +08:00
|
|
|
}
|
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
void AggExprEmitter::
|
|
|
|
VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
|
2008-11-13 09:38:36 +08:00
|
|
|
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
|
|
|
|
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
|
|
|
|
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
// Bind the common expression if necessary.
|
2012-01-07 04:42:20 +08:00
|
|
|
CodeGenFunction::OpaqueValueMapping binding(CGF, E);
|
2011-02-17 18:25:35 +08:00
|
|
|
|
2011-01-26 12:00:11 +08:00
|
|
|
CodeGenFunction::ConditionalEvaluation eval(CGF);
|
2015-04-24 07:06:47 +08:00
|
|
|
CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
|
|
|
|
CGF.getProfileCount(E));
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-11-17 08:07:33 +08:00
|
|
|
// Save whether the destination's lifetime is externally managed.
|
2011-08-26 16:02:37 +08:00
|
|
|
bool isExternallyDestructed = Dest.isExternallyDestructed();
|
2007-08-11 08:04:45 +08:00
|
|
|
|
2011-01-26 12:00:11 +08:00
|
|
|
eval.begin(CGF);
|
|
|
|
CGF.EmitBlock(LHSBlock);
|
2015-04-24 07:06:47 +08:00
|
|
|
CGF.incrementProfileCounter(E);
|
2011-02-17 18:25:35 +08:00
|
|
|
Visit(E->getTrueExpr());
|
2011-01-26 12:00:11 +08:00
|
|
|
eval.end(CGF);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-26 12:00:11 +08:00
|
|
|
assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
|
|
|
|
CGF.Builder.CreateBr(ContBlock);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-11-17 08:07:33 +08:00
|
|
|
// If the result of an agg expression is unused, then the emission
|
|
|
|
// of the LHS might need to create a destination slot. That's fine
|
|
|
|
// with us, and we can safely emit the RHS into the same slot, but
|
2011-08-26 16:02:37 +08:00
|
|
|
// we shouldn't claim that it's already being destructed.
|
|
|
|
Dest.setExternallyDestructed(isExternallyDestructed);
|
2010-11-17 08:07:33 +08:00
|
|
|
|
2011-01-26 12:00:11 +08:00
|
|
|
eval.begin(CGF);
|
|
|
|
CGF.EmitBlock(RHSBlock);
|
2011-02-17 18:25:35 +08:00
|
|
|
Visit(E->getFalseExpr());
|
2011-01-26 12:00:11 +08:00
|
|
|
eval.end(CGF);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
CGF.EmitBlock(ContBlock);
|
2007-08-11 08:04:45 +08:00
|
|
|
}
|
2007-08-21 12:59:27 +08:00
|
|
|
|
2009-07-09 02:33:14 +08:00
|
|
|
void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
|
2013-07-20 08:40:58 +08:00
|
|
|
Visit(CE->getChosenSubExpr());
|
2009-07-09 02:33:14 +08:00
|
|
|
}
|
|
|
|
|
2008-05-27 23:51:49 +08:00
|
|
|
void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
|
2009-02-12 06:25:55 +08:00
|
|
|
llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
|
2008-11-04 13:30:00 +08:00
|
|
|
llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
|
|
|
|
|
2009-01-10 05:09:38 +08:00
|
|
|
if (!ArgPtr) {
|
2014-01-23 04:11:01 +08:00
|
|
|
// If EmitVAArg fails, we fall back to the LLVM instruction.
|
|
|
|
llvm::Value *Val =
|
|
|
|
Builder.CreateVAArg(ArgValue, CGF.ConvertType(VE->getType()));
|
|
|
|
if (!Dest.isIgnored())
|
|
|
|
Builder.CreateStore(Val, Dest.getAddr());
|
2009-01-10 05:09:38 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
|
2008-05-27 23:51:49 +08:00
|
|
|
}
|
|
|
|
|
2009-05-31 07:23:33 +08:00
|
|
|
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
|
2010-09-15 18:14:12 +08:00
|
|
|
// Ensure that we have a slot, but if we already do, remember
|
2011-08-26 16:02:37 +08:00
|
|
|
// whether it was externally destructed.
|
|
|
|
bool wasExternallyDestructed = Dest.isExternallyDestructed();
|
2012-07-03 07:58:38 +08:00
|
|
|
EnsureDest(E->getType());
|
2011-08-26 16:02:37 +08:00
|
|
|
|
|
|
|
// We're going to push a destructor if there isn't already one.
|
|
|
|
Dest.setExternallyDestructed();
|
2010-09-15 18:14:12 +08:00
|
|
|
|
|
|
|
Visit(E->getSubExpr());
|
|
|
|
|
2011-08-26 16:02:37 +08:00
|
|
|
// Push that destructor we promised.
|
|
|
|
if (!wasExternallyDestructed)
|
2011-11-28 06:09:22 +08:00
|
|
|
CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
|
2009-05-31 07:23:33 +08:00
|
|
|
}
|
|
|
|
|
2009-04-17 08:06:03 +08:00
|
|
|
void
|
2009-05-04 01:47:16 +08:00
|
|
|
AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
|
2010-09-15 18:14:12 +08:00
|
|
|
AggValueSlot Slot = EnsureSlot(E->getType());
|
|
|
|
CGF.EmitCXXConstructExpr(E, Slot);
|
2009-05-19 12:48:36 +08:00
|
|
|
}
|
|
|
|
|
2012-02-09 11:32:31 +08:00
|
|
|
void
|
|
|
|
AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
|
|
|
|
AggValueSlot Slot = EnsureSlot(E->getType());
|
|
|
|
CGF.EmitLambdaExpr(E, Slot);
|
|
|
|
}
|
|
|
|
|
2010-12-06 16:20:24 +08:00
|
|
|
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
|
2011-11-10 16:15:53 +08:00
|
|
|
CGF.enterFullExpression(E);
|
|
|
|
CodeGenFunction::RunCleanupsScope cleanups(CGF);
|
|
|
|
Visit(E->getSubExpr());
|
2009-04-17 08:06:03 +08:00
|
|
|
}
|
|
|
|
|
2010-07-08 14:14:04 +08:00
|
|
|
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
|
2010-09-15 18:14:12 +08:00
|
|
|
QualType T = E->getType();
|
|
|
|
AggValueSlot Slot = EnsureSlot(T);
|
2011-06-16 12:16:24 +08:00
|
|
|
EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
|
2009-12-16 14:57:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
|
2010-09-15 18:14:12 +08:00
|
|
|
QualType T = E->getType();
|
|
|
|
AggValueSlot Slot = EnsureSlot(T);
|
2011-06-16 12:16:24 +08:00
|
|
|
EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
|
2009-10-18 23:18:11 +08:00
|
|
|
}
|
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
/// isSimpleZero - If emitting this value will obviously just cause a store of
|
|
|
|
/// zero to memory, return true. This can return false if uncertain, so it just
|
|
|
|
/// handles simple cases.
|
|
|
|
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
|
2011-04-15 08:35:48 +08:00
|
|
|
E = E->IgnoreParens();
|
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
// 0
|
|
|
|
if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
|
|
|
|
return IL->getValue() == 0;
|
|
|
|
// +0.0
|
|
|
|
if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
|
|
|
|
return FL->getValue().isPosZero();
|
|
|
|
// int()
|
|
|
|
if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
|
|
|
|
CGF.getTypes().isZeroInitializable(E->getType()))
|
|
|
|
return true;
|
|
|
|
// (int*)0 - Null pointer expressions.
|
|
|
|
if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
|
|
|
|
return ICE->getCastKind() == CK_NullToPointer;
|
|
|
|
// '\0'
|
|
|
|
if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
|
|
|
|
return CL->getValue() == 0;
|
|
|
|
|
|
|
|
// Otherwise, hard case: conservatively return false.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-04 01:33:16 +08:00
|
|
|
void
|
2013-10-02 10:29:49 +08:00
|
|
|
AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
|
2011-06-16 12:16:24 +08:00
|
|
|
QualType type = LV.getType();
|
2009-05-29 23:46:01 +08:00
|
|
|
// FIXME: Ignore result?
|
2008-04-05 02:42:16 +08:00
|
|
|
// FIXME: Are initializers affected by volatile?
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
|
|
|
|
// Storing "i32 0" to a zero'd memory location is a noop.
|
2013-03-08 05:37:08 +08:00
|
|
|
return;
|
2012-12-21 11:17:28 +08:00
|
|
|
} else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
|
2013-03-08 05:37:08 +08:00
|
|
|
return EmitNullInitializationToLValue(LV);
|
2011-06-16 12:16:24 +08:00
|
|
|
} else if (type->isReferenceType()) {
|
2013-06-13 07:38:09 +08:00
|
|
|
RValue RV = CGF.EmitReferenceBindingToExpr(E);
|
2013-03-08 05:37:08 +08:00
|
|
|
return CGF.EmitStoreThroughLValue(RV, LV);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (CGF.getEvaluationKind(type)) {
|
|
|
|
case TEK_Complex:
|
|
|
|
CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
|
|
|
|
return;
|
|
|
|
case TEK_Aggregate:
|
2011-08-26 04:40:09 +08:00
|
|
|
CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
|
|
|
|
AggValueSlot::IsDestructed,
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2011-08-26 07:04:34 +08:00
|
|
|
AggValueSlot::IsNotAliased,
|
2011-06-16 12:16:24 +08:00
|
|
|
Dest.isZeroed()));
|
2013-03-08 05:37:08 +08:00
|
|
|
return;
|
|
|
|
case TEK_Scalar:
|
|
|
|
if (LV.isSimple()) {
|
2014-05-21 13:09:00 +08:00
|
|
|
CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
|
2013-03-08 05:37:08 +08:00
|
|
|
} else {
|
|
|
|
CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
|
|
|
|
}
|
|
|
|
return;
|
2008-04-05 02:42:16 +08:00
|
|
|
}
|
2013-03-08 05:37:08 +08:00
|
|
|
llvm_unreachable("bad evaluation kind");
|
2008-04-05 02:42:16 +08:00
|
|
|
}
|
2008-02-19 06:44:02 +08:00
|
|
|
|
2011-06-16 12:16:24 +08:00
|
|
|
void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
|
|
|
|
QualType type = lv.getType();
|
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
// If the destination slot is already zeroed out before the aggregate is
|
|
|
|
// copied into it, we don't have to emit any zeros here.
|
2011-06-16 12:16:24 +08:00
|
|
|
if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
return;
|
|
|
|
|
2013-03-08 05:37:08 +08:00
|
|
|
if (CGF.hasScalarEvaluationKind(type)) {
|
2012-12-21 11:17:28 +08:00
|
|
|
// For non-aggregates, we can store the appropriate null constant.
|
|
|
|
llvm::Value *null = CGF.CGM.EmitNullConstant(type);
|
2012-02-22 13:38:59 +08:00
|
|
|
// Note that the following is not equivalent to
|
|
|
|
// EmitStoreThroughBitfieldLValue for ARC types.
|
2012-02-25 07:53:49 +08:00
|
|
|
if (lv.isBitField()) {
|
2012-02-22 13:38:59 +08:00
|
|
|
CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
|
2012-02-25 07:53:49 +08:00
|
|
|
} else {
|
|
|
|
assert(lv.isSimple());
|
|
|
|
CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
|
|
|
|
}
|
2008-04-05 02:42:16 +08:00
|
|
|
} else {
|
|
|
|
// There's a potential optimization opportunity in combining
|
|
|
|
// memsets; that would be easy for arrays, but relatively
|
|
|
|
// difficult for structures with the current code.
|
2011-06-16 12:16:24 +08:00
|
|
|
CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
|
2008-04-05 02:42:16 +08:00
|
|
|
}
|
|
|
|
}
|
2008-02-19 06:44:02 +08:00
|
|
|
|
2008-04-05 02:42:16 +08:00
|
|
|
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
|
2008-12-02 09:17:45 +08:00
|
|
|
#if 0
|
2009-12-04 09:30:56 +08:00
|
|
|
// FIXME: Assess perf here? Figure out what cases are worth optimizing here
|
|
|
|
// (Length of globals? Chunks of zeroed-out space?).
|
2008-12-02 09:17:45 +08:00
|
|
|
//
|
2009-05-16 15:57:57 +08:00
|
|
|
// If we can, prefer a copy from a global; this is a lot less code for long
|
|
|
|
// globals, and it's easier for the current optimizers to analyze.
|
2009-12-04 09:30:56 +08:00
|
|
|
if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
|
2008-11-30 10:11:09 +08:00
|
|
|
llvm::GlobalVariable* GV =
|
2009-12-04 09:30:56 +08:00
|
|
|
new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
|
|
|
|
llvm::GlobalValue::InternalLinkage, C, "");
|
2012-07-03 07:58:38 +08:00
|
|
|
EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
|
2008-11-30 10:11:09 +08:00
|
|
|
return;
|
|
|
|
}
|
2008-12-02 09:17:45 +08:00
|
|
|
#endif
|
2010-09-06 08:11:41 +08:00
|
|
|
if (E->hadArrayRangeDesignator())
|
2009-01-30 03:42:23 +08:00
|
|
|
CGF.ErrorUnsupported(E, "GNU array range designator extension");
|
|
|
|
|
2013-05-24 05:54:14 +08:00
|
|
|
AggValueSlot Dest = EnsureSlot(E->getType());
|
|
|
|
|
2012-04-16 11:54:45 +08:00
|
|
|
LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
|
|
|
|
Dest.getAlignment());
|
2010-09-15 18:14:12 +08:00
|
|
|
|
2008-04-05 02:42:16 +08:00
|
|
|
// Handle initialization of an array.
|
|
|
|
if (E->getType()->isArrayType()) {
|
2012-04-15 10:50:59 +08:00
|
|
|
if (E->isStringLiteralInit())
|
|
|
|
return Visit(E->getInit(0));
|
2008-05-20 01:51:16 +08:00
|
|
|
|
2012-02-23 10:25:10 +08:00
|
|
|
QualType elementType =
|
|
|
|
CGF.getContext().getAsArrayType(E->getType())->getElementType();
|
2011-07-09 09:37:26 +08:00
|
|
|
|
2012-02-17 16:42:25 +08:00
|
|
|
llvm::PointerType *APType =
|
2012-04-16 11:54:45 +08:00
|
|
|
cast<llvm::PointerType>(Dest.getAddr()->getType());
|
2012-02-17 16:42:25 +08:00
|
|
|
llvm::ArrayType *AType =
|
|
|
|
cast<llvm::ArrayType>(APType->getElementType());
|
2011-07-09 09:37:26 +08:00
|
|
|
|
2012-04-16 11:54:45 +08:00
|
|
|
EmitArrayInit(Dest.getAddr(), AType, elementType, E);
|
2008-02-19 06:44:02 +08:00
|
|
|
return;
|
2008-04-05 02:42:16 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2014-07-31 14:31:19 +08:00
|
|
|
if (E->getType()->isAtomicType()) {
|
|
|
|
// An _Atomic(T) object can be list-initialized from an expression
|
|
|
|
// of the same type.
|
|
|
|
assert(E->getNumInits() == 1 &&
|
|
|
|
CGF.getContext().hasSameUnqualifiedType(E->getInit(0)->getType(),
|
|
|
|
E->getType()) &&
|
|
|
|
"unexpected list initialization for atomic object");
|
|
|
|
return Visit(E->getInit(0));
|
|
|
|
}
|
|
|
|
|
2008-04-05 02:42:16 +08:00
|
|
|
assert(E->getType()->isRecordType() && "Only support structs/unions here!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-04-05 02:42:16 +08:00
|
|
|
// Do struct initialization; this code just sets each individual member
|
|
|
|
// to the approprate value. This makes bitfield support automatic;
|
|
|
|
// the disadvantage is that the generated code is more difficult for
|
|
|
|
// the optimizer, especially with bitfields.
|
|
|
|
unsigned NumInitElements = E->getNumInits();
|
2011-07-12 03:35:02 +08:00
|
|
|
RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
|
2013-04-21 06:23:05 +08:00
|
|
|
|
|
|
|
// Prepare a 'this' for CXXDefaultInitExprs.
|
|
|
|
CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddr());
|
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
if (record->isUnion()) {
|
2009-01-30 00:53:55 +08:00
|
|
|
// Only initialize one field of a union. The field itself is
|
|
|
|
// specified by the initializer list.
|
|
|
|
if (!E->getInitializedFieldInUnion()) {
|
|
|
|
// Empty union; we have nothing to do.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-30 00:53:55 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
// Make sure that it's really an empty and not a failure of
|
|
|
|
// semantic analysis.
|
2014-03-09 04:12:42 +08:00
|
|
|
for (const auto *Field : record->fields())
|
2009-01-30 00:53:55 +08:00
|
|
|
assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
|
|
|
|
#endif
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: volatility
|
|
|
|
FieldDecl *Field = E->getInitializedFieldInUnion();
|
|
|
|
|
2012-04-16 11:54:45 +08:00
|
|
|
LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
|
2009-01-30 00:53:55 +08:00
|
|
|
if (NumInitElements) {
|
|
|
|
// Store the initializer into the field
|
2012-03-30 01:37:10 +08:00
|
|
|
EmitInitializationToLValue(E->getInit(0), FieldLoc);
|
2009-01-30 00:53:55 +08:00
|
|
|
} else {
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
// Default-initialize to null.
|
2011-06-16 12:16:24 +08:00
|
|
|
EmitNullInitializationToLValue(FieldLoc);
|
2009-01-30 00:53:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
// We'll need to enter cleanup scopes in case any of the member
|
|
|
|
// initializers throw an exception.
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Instruction *cleanupDominator = nullptr;
|
2011-07-12 03:35:02 +08:00
|
|
|
|
2008-04-05 02:42:16 +08:00
|
|
|
// Here we iterate over the fields; this makes it simpler to both
|
|
|
|
// default-initialize fields and skip over unnamed fields.
|
2011-07-12 03:35:02 +08:00
|
|
|
unsigned curInitIndex = 0;
|
2014-03-09 04:12:42 +08:00
|
|
|
for (const auto *field : record->fields()) {
|
2011-07-12 03:35:02 +08:00
|
|
|
// We're done once we hit the flexible array member.
|
|
|
|
if (field->getType()->isIncompleteArrayType())
|
2008-12-12 00:49:14 +08:00
|
|
|
break;
|
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
// Always skip anonymous bitfields.
|
|
|
|
if (field->isUnnamedBitfield())
|
2008-04-05 02:42:16 +08:00
|
|
|
continue;
|
2009-01-29 07:36:17 +08:00
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
// We're done if we reach the end of the explicit initializers, we
|
|
|
|
// have a zeroed object, and the rest of the fields are
|
|
|
|
// zero-initializable.
|
|
|
|
if (curInitIndex == NumInitElements && Dest.isZeroed() &&
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
CGF.getTypes().isZeroInitializable(E->getType()))
|
|
|
|
break;
|
|
|
|
|
2012-04-16 11:54:45 +08:00
|
|
|
|
2014-03-09 04:12:42 +08:00
|
|
|
LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
|
2009-05-28 03:54:11 +08:00
|
|
|
// We never generate write-barries for initialized fields.
|
2011-07-12 03:35:02 +08:00
|
|
|
LV.setNonGC(true);
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
if (curInitIndex < NumInitElements) {
|
2010-03-09 05:08:07 +08:00
|
|
|
// Store the initializer into the field.
|
2012-03-30 01:37:10 +08:00
|
|
|
EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
|
2008-04-05 02:42:16 +08:00
|
|
|
} else {
|
|
|
|
// We're out of initalizers; default-initialize to null
|
2011-07-12 03:35:02 +08:00
|
|
|
EmitNullInitializationToLValue(LV);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push a destructor if necessary.
|
|
|
|
// FIXME: if we have an array of structures, all explicitly
|
|
|
|
// initialized, we can end up pushing a linear number of cleanups.
|
|
|
|
bool pushedCleanup = false;
|
|
|
|
if (QualType::DestructionKind dtorKind
|
|
|
|
= field->getType().isDestructedType()) {
|
|
|
|
assert(LV.isSimple());
|
|
|
|
if (CGF.needsEHCleanup(dtorKind)) {
|
2011-11-10 18:43:54 +08:00
|
|
|
if (!cleanupDominator)
|
|
|
|
cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
|
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
|
|
|
|
CGF.getDestroyer(dtorKind), false);
|
|
|
|
cleanups.push_back(CGF.EHStack.stable_begin());
|
|
|
|
pushedCleanup = true;
|
|
|
|
}
|
2008-04-05 02:42:16 +08:00
|
|
|
}
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
|
|
|
// If the GEP didn't get used because of a dead zero init or something
|
|
|
|
// else, clean it up for -O0 builds and general tidiness.
|
2011-07-12 03:35:02 +08:00
|
|
|
if (!pushedCleanup && LV.isSimple())
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
if (llvm::GetElementPtrInst *GEP =
|
2011-07-12 03:35:02 +08:00
|
|
|
dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
if (GEP->use_empty())
|
|
|
|
GEP->eraseFromParent();
|
2008-02-20 03:27:31 +08:00
|
|
|
}
|
2011-07-12 03:35:02 +08:00
|
|
|
|
|
|
|
// Deactivate all the partial cleanups in reverse order, which
|
|
|
|
// generally means popping them.
|
|
|
|
for (unsigned i = cleanups.size(); i != 0; --i)
|
2011-11-10 18:43:54 +08:00
|
|
|
CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
|
|
|
|
|
|
|
|
// Destroy the placeholder if we made one.
|
|
|
|
if (cleanupDominator)
|
|
|
|
cleanupDominator->eraseFromParent();
|
2007-10-27 01:44:44 +08:00
|
|
|
}
|
|
|
|
|
2007-08-21 12:59:27 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Entry Points into this File
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
|
|
|
|
/// non-zero bytes that will be stored when outputting the initializer for the
|
|
|
|
/// specified initializer expression.
|
2011-04-25 01:17:56 +08:00
|
|
|
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
|
2011-04-15 08:35:48 +08:00
|
|
|
E = E->IgnoreParens();
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
|
|
|
// 0 and 0.0 won't require any non-zero stores!
|
2011-04-25 01:17:56 +08:00
|
|
|
if (isSimpleZero(E, CGF)) return CharUnits::Zero();
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
|
|
|
// If this is an initlist expr, sum up the size of sizes of the (present)
|
|
|
|
// elements. If this is something weird, assume the whole thing is non-zero.
|
|
|
|
const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
|
2011-04-25 01:17:56 +08:00
|
|
|
return CGF.getContext().getTypeSizeInChars(E->getType());
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
2010-12-03 02:29:00 +08:00
|
|
|
// InitListExprs for structs have to be handled carefully. If there are
|
|
|
|
// reference members, we need to consider the size of the reference, not the
|
|
|
|
// referencee. InitListExprs for unions and arrays can't have references.
|
2010-12-03 06:52:04 +08:00
|
|
|
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
|
|
|
|
if (!RT->isUnionType()) {
|
|
|
|
RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
|
2011-04-25 01:17:56 +08:00
|
|
|
CharUnits NumNonZeroBytes = CharUnits::Zero();
|
2010-12-03 02:29:00 +08:00
|
|
|
|
2010-12-03 06:52:04 +08:00
|
|
|
unsigned ILEElement = 0;
|
2014-03-09 04:12:42 +08:00
|
|
|
for (const auto *Field : SD->fields()) {
|
2010-12-03 06:52:04 +08:00
|
|
|
// We're done once we hit the flexible array member or run out of
|
|
|
|
// InitListExpr elements.
|
|
|
|
if (Field->getType()->isIncompleteArrayType() ||
|
|
|
|
ILEElement == ILE->getNumInits())
|
|
|
|
break;
|
|
|
|
if (Field->isUnnamedBitfield())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const Expr *E = ILE->getInit(ILEElement++);
|
|
|
|
|
|
|
|
// Reference values are always non-null and have the width of a pointer.
|
|
|
|
if (Field->getType()->isReferenceType())
|
2011-04-25 01:17:56 +08:00
|
|
|
NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
|
2013-04-17 06:48:15 +08:00
|
|
|
CGF.getTarget().getPointerWidth(0));
|
2010-12-03 06:52:04 +08:00
|
|
|
else
|
|
|
|
NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
|
|
|
|
}
|
2010-12-03 02:29:00 +08:00
|
|
|
|
2010-12-03 06:52:04 +08:00
|
|
|
return NumNonZeroBytes;
|
2010-12-03 02:29:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-25 01:17:56 +08:00
|
|
|
CharUnits NumNonZeroBytes = CharUnits::Zero();
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
|
|
|
|
NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
|
|
|
|
return NumNonZeroBytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
|
|
|
|
/// zeros in it, emit a memset and avoid storing the individual zeros.
|
|
|
|
///
|
|
|
|
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
|
|
|
|
CodeGenFunction &CGF) {
|
|
|
|
// If the slot is already known to be zeroed, nothing to do. Don't mess with
|
|
|
|
// volatile stores.
|
2014-05-21 13:09:00 +08:00
|
|
|
if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == nullptr)
|
|
|
|
return;
|
2011-04-29 06:57:55 +08:00
|
|
|
|
|
|
|
// C++ objects with a user-declared constructor don't need zero'ing.
|
2012-11-02 06:30:59 +08:00
|
|
|
if (CGF.getLangOpts().CPlusPlus)
|
2011-04-29 06:57:55 +08:00
|
|
|
if (const RecordType *RT = CGF.getContext()
|
|
|
|
.getBaseElementType(E->getType())->getAs<RecordType>()) {
|
|
|
|
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
|
|
|
|
if (RD->hasUserDeclaredConstructor())
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
// If the type is 16-bytes or smaller, prefer individual stores over memset.
|
2011-04-25 01:25:32 +08:00
|
|
|
std::pair<CharUnits, CharUnits> TypeInfo =
|
|
|
|
CGF.getContext().getTypeInfoInChars(E->getType());
|
|
|
|
if (TypeInfo.first <= CharUnits::fromQuantity(16))
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Check to see if over 3/4 of the initializer are known to be zero. If so,
|
|
|
|
// we prefer to emit memset + individual stores for the rest.
|
2011-04-25 01:25:32 +08:00
|
|
|
CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
|
|
|
|
if (NumNonZeroBytes*4 > TypeInfo.first)
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Okay, it seems like a good idea to use an initial memset, emit the call.
|
2011-04-25 01:25:32 +08:00
|
|
|
llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
|
|
|
|
CharUnits Align = TypeInfo.second;
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
|
|
|
llvm::Value *Loc = Slot.getAddr();
|
|
|
|
|
2012-02-07 08:39:47 +08:00
|
|
|
Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
|
2011-04-25 01:25:32 +08:00
|
|
|
CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
|
|
|
|
Align.getQuantity(), false);
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
|
|
|
// Tell the AggExprEmitter that the slot is known zero.
|
|
|
|
Slot.setZeroed();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2009-05-27 02:57:45 +08:00
|
|
|
/// EmitAggExpr - Emit the computation of the specified expression of aggregate
|
|
|
|
/// type. The result is computed into DestPtr. Note that if DestPtr is null,
|
|
|
|
/// the value of the aggregate expression is not needed. If VolatileDest is
|
|
|
|
/// true, DestPtr cannot be 0.
|
2012-07-03 07:58:38 +08:00
|
|
|
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
|
2013-03-08 05:37:08 +08:00
|
|
|
assert(E && hasAggregateEvaluationKind(E->getType()) &&
|
2007-08-21 12:59:27 +08:00
|
|
|
"Invalid aggregate expression to emit");
|
2014-05-21 13:09:00 +08:00
|
|
|
assert((Slot.getAddr() != nullptr || Slot.isIgnored()) &&
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
"slot has bits but no address");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
// Optimize the slot if possible.
|
|
|
|
CheckAggExprForMemSetUse(Slot, E, *this);
|
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
AggExprEmitter(*this, Slot).Visit(const_cast<Expr*>(E));
|
2007-08-21 12:59:27 +08:00
|
|
|
}
|
2008-09-10 04:49:46 +08:00
|
|
|
|
2010-02-06 03:38:31 +08:00
|
|
|
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
|
2013-03-08 05:37:08 +08:00
|
|
|
assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
|
2010-02-09 10:48:28 +08:00
|
|
|
llvm::Value *Temp = CreateMemTemp(E->getType());
|
2010-08-21 11:15:20 +08:00
|
|
|
LValue LV = MakeAddrLValue(Temp, E->getType());
|
2011-08-26 04:40:09 +08:00
|
|
|
EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
|
2011-08-26 15:31:35 +08:00
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2012-03-30 01:37:10 +08:00
|
|
|
AggValueSlot::IsNotAliased));
|
2010-08-21 11:15:20 +08:00
|
|
|
return LV;
|
2010-02-06 03:38:31 +08:00
|
|
|
}
|
|
|
|
|
2012-03-30 01:37:10 +08:00
|
|
|
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
|
|
|
|
llvm::Value *SrcPtr, QualType Ty,
|
2012-07-03 07:58:38 +08:00
|
|
|
bool isVolatile,
|
2012-09-30 20:43:37 +08:00
|
|
|
CharUnits alignment,
|
|
|
|
bool isAssignment) {
|
2012-03-30 01:37:10 +08:00
|
|
|
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2012-11-02 06:30:59 +08:00
|
|
|
if (getLangOpts().CPlusPlus) {
|
2012-03-30 01:37:10 +08:00
|
|
|
if (const RecordType *RT = Ty->getAs<RecordType>()) {
|
|
|
|
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
|
|
|
|
assert((Record->hasTrivialCopyConstructor() ||
|
|
|
|
Record->hasTrivialCopyAssignment() ||
|
|
|
|
Record->hasTrivialMoveConstructor() ||
|
2015-04-30 03:26:57 +08:00
|
|
|
Record->hasTrivialMoveAssignment() ||
|
|
|
|
Record->isUnion()) &&
|
2012-11-16 08:53:38 +08:00
|
|
|
"Trying to aggregate-copy a type without a trivial copy/move "
|
2010-05-20 23:39:01 +08:00
|
|
|
"constructor or assignment operator");
|
2012-03-30 01:37:10 +08:00
|
|
|
// Ignore empty classes in C++.
|
|
|
|
if (Record->isEmpty())
|
2010-05-03 09:20:20 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-30 01:37:10 +08:00
|
|
|
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
|
|
|
|
// C99 6.5.16.1p3, which states "If the value being stored in an object is
|
|
|
|
// read from another object that overlaps in anyway the storage of the first
|
|
|
|
// object, then the overlap shall be exact and the two objects shall have
|
|
|
|
// qualified or unqualified versions of a compatible type."
|
|
|
|
//
|
|
|
|
// memcpy is not defined if the source and destination pointers are exactly
|
|
|
|
// equal, but other compilers do this optimization, and almost every memcpy
|
|
|
|
// implementation handles this case safely. If there is a libc that does not
|
|
|
|
// safely handle this, we can add a target hook.
|
|
|
|
|
2012-09-30 20:43:37 +08:00
|
|
|
// Get data size and alignment info for this aggregate. If this is an
|
|
|
|
// assignment don't copy the tail padding. Otherwise copying it is fine.
|
|
|
|
std::pair<CharUnits, CharUnits> TypeInfo;
|
|
|
|
if (isAssignment)
|
|
|
|
TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
|
|
|
|
else
|
|
|
|
TypeInfo = getContext().getTypeInfoInChars(Ty);
|
2012-03-30 01:37:10 +08:00
|
|
|
|
2012-07-03 07:58:38 +08:00
|
|
|
if (alignment.isZero())
|
|
|
|
alignment = TypeInfo.second;
|
2012-03-30 01:37:10 +08:00
|
|
|
|
2015-05-20 11:46:04 +08:00
|
|
|
llvm::Value *SizeVal = nullptr;
|
|
|
|
if (TypeInfo.first.isZero()) {
|
|
|
|
// But note that getTypeInfo returns 0 for a VLA.
|
|
|
|
if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
|
|
|
|
getContext().getAsArrayType(Ty))) {
|
|
|
|
QualType BaseEltTy;
|
|
|
|
SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
|
|
|
|
TypeInfo = getContext().getTypeInfoDataSizeInChars(BaseEltTy);
|
|
|
|
std::pair<CharUnits, CharUnits> LastElementTypeInfo;
|
|
|
|
if (!isAssignment)
|
|
|
|
LastElementTypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
|
|
|
|
assert(!TypeInfo.first.isZero());
|
|
|
|
SizeVal = Builder.CreateNUWMul(
|
|
|
|
SizeVal,
|
|
|
|
llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
|
|
|
|
if (!isAssignment) {
|
|
|
|
SizeVal = Builder.CreateNUWSub(
|
|
|
|
SizeVal,
|
|
|
|
llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
|
|
|
|
SizeVal = Builder.CreateNUWAdd(
|
|
|
|
SizeVal, llvm::ConstantInt::get(
|
|
|
|
SizeTy, LastElementTypeInfo.first.getQuantity()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!SizeVal) {
|
|
|
|
SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
|
|
|
|
}
|
2012-03-30 01:37:10 +08:00
|
|
|
|
|
|
|
// FIXME: If we have a volatile struct, the optimizer can remove what might
|
|
|
|
// appear to be `extra' memory ops:
|
|
|
|
//
|
|
|
|
// volatile struct { int i; } a, b;
|
|
|
|
//
|
|
|
|
// int main() {
|
|
|
|
// a = b;
|
|
|
|
// a = b;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// we need to use a different call here. We use isVolatile to indicate when
|
|
|
|
// either the source or the destination is volatile.
|
|
|
|
|
|
|
|
llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *DBP =
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
|
2012-03-30 01:37:10 +08:00
|
|
|
DestPtr = Builder.CreateBitCast(DestPtr, DBP);
|
2010-04-04 11:10:52 +08:00
|
|
|
|
2012-03-30 01:37:10 +08:00
|
|
|
llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *SBP =
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
|
2012-03-30 01:37:10 +08:00
|
|
|
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
|
2010-04-04 11:10:52 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
// Don't do any of the memmove_collectable tests if GC isn't set.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
|
2011-06-16 07:02:42 +08:00
|
|
|
// fall through
|
2012-03-30 01:37:10 +08:00
|
|
|
} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
|
|
|
|
RecordDecl *Record = RecordTy->getDecl();
|
|
|
|
if (Record->hasObjectMember()) {
|
|
|
|
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
|
|
|
|
SizeVal);
|
2010-06-16 06:44:06 +08:00
|
|
|
return;
|
|
|
|
}
|
2012-03-30 01:37:10 +08:00
|
|
|
} else if (Ty->isArrayType()) {
|
|
|
|
QualType BaseType = getContext().getBaseElementType(Ty);
|
|
|
|
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
|
|
|
|
if (RecordTy->getDecl()->hasObjectMember()) {
|
|
|
|
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
|
|
|
|
SizeVal);
|
2010-06-16 06:44:06 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-09-29 05:58:29 +08:00
|
|
|
|
|
|
|
// Determine the metadata to describe the position of any padding in this
|
|
|
|
// memcpy, as well as the TBAA tags for the members of the struct, in case
|
|
|
|
// the optimizer wishes to expand it in to scalar memory operations.
|
|
|
|
llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty);
|
2014-05-21 13:09:00 +08:00
|
|
|
|
2015-05-20 11:46:04 +08:00
|
|
|
Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, alignment.getQuantity(),
|
|
|
|
isVolatile, /*TBAATag=*/nullptr, TBAAStructTag);
|
2008-09-10 04:49:46 +08:00
|
|
|
}
|