2007-08-24 10:22:53 +08:00
|
|
|
//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
|
2007-08-11 04:13:28 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 03:59:25 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2007-08-11 04:13:28 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code to emit Aggregate Expr nodes as LLVM code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CodeGenFunction.h"
|
2007-08-11 08:04:45 +08:00
|
|
|
#include "CodeGenModule.h"
|
2009-07-08 09:18:33 +08:00
|
|
|
#include "CGObjCRuntime.h"
|
2008-08-11 13:00:27 +08:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2009-04-17 08:06:03 +08:00
|
|
|
#include "clang/AST/DeclCXX.h"
|
2012-02-17 16:42:25 +08:00
|
|
|
#include "clang/AST/DeclTemplate.h"
|
2008-08-11 13:00:27 +08:00
|
|
|
#include "clang/AST/StmtVisitor.h"
|
2007-08-11 08:04:45 +08:00
|
|
|
#include "llvm/Constants.h"
|
|
|
|
#include "llvm/Function.h"
|
2007-10-27 01:44:44 +08:00
|
|
|
#include "llvm/GlobalVariable.h"
|
2008-04-05 02:42:16 +08:00
|
|
|
#include "llvm/Intrinsics.h"
|
2007-08-11 04:13:28 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
2007-08-11 08:04:45 +08:00
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Aggregate Expression Emitter
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
2009-11-29 03:45:26 +08:00
|
|
|
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
|
2007-08-21 12:25:47 +08:00
|
|
|
CodeGenFunction &CGF;
|
2008-11-01 09:53:16 +08:00
|
|
|
CGBuilderTy &Builder;
|
2010-09-15 18:14:12 +08:00
|
|
|
AggValueSlot Dest;
|
2009-05-27 06:03:21 +08:00
|
|
|
bool IgnoreResult;
|
2010-05-22 09:48:05 +08:00
|
|
|
|
2011-08-26 07:04:34 +08:00
|
|
|
/// We want to use 'dest' as the return slot except under two
|
|
|
|
/// conditions:
|
|
|
|
/// - The destination slot requires garbage collection, so we
|
|
|
|
/// need to use the GC API.
|
|
|
|
/// - The destination slot is potentially aliased.
|
|
|
|
bool shouldUseDestForReturnSlot() const {
|
|
|
|
return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
|
|
|
|
}
|
|
|
|
|
2010-05-22 09:48:05 +08:00
|
|
|
ReturnValueSlot getReturnValueSlot() const {
|
2011-08-26 07:04:34 +08:00
|
|
|
if (!shouldUseDestForReturnSlot())
|
|
|
|
return ReturnValueSlot();
|
2010-05-23 06:13:32 +08:00
|
|
|
|
2010-09-15 18:14:12 +08:00
|
|
|
return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
|
|
|
|
}
|
|
|
|
|
|
|
|
AggValueSlot EnsureSlot(QualType T) {
|
|
|
|
if (!Dest.isIgnored()) return Dest;
|
|
|
|
return CGF.CreateAggTemp(T, "agg.tmp.ensured");
|
2010-05-22 09:48:05 +08:00
|
|
|
}
|
2010-05-23 06:13:32 +08:00
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
public:
|
2010-09-15 18:14:12 +08:00
|
|
|
AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
|
2010-09-16 08:20:07 +08:00
|
|
|
bool ignore)
|
2010-09-15 18:14:12 +08:00
|
|
|
: CGF(cgf), Builder(CGF.Builder), Dest(Dest),
|
2010-09-16 08:20:07 +08:00
|
|
|
IgnoreResult(ignore) {
|
2007-08-21 12:25:47 +08:00
|
|
|
}
|
|
|
|
|
2007-08-21 12:59:27 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Utilities
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
/// EmitAggLoadOfLValue - Given an expression with aggregate type that
|
|
|
|
/// represents a value lvalue, this method emits the address of the lvalue,
|
|
|
|
/// then loads the result into DestPtr.
|
|
|
|
void EmitAggLoadOfLValue(const Expr *E);
|
2008-05-20 01:51:16 +08:00
|
|
|
|
2009-05-24 04:28:01 +08:00
|
|
|
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
2009-05-27 06:03:21 +08:00
|
|
|
void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
|
2011-12-06 06:23:28 +08:00
|
|
|
void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false,
|
|
|
|
unsigned Alignment = 0);
|
2009-05-24 04:28:01 +08:00
|
|
|
|
2011-08-26 07:04:34 +08:00
|
|
|
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
|
2010-05-23 06:13:32 +08:00
|
|
|
|
2012-02-19 20:28:02 +08:00
|
|
|
void EmitStdInitializerList(llvm::Value *DestPtr, InitListExpr *InitList);
|
2012-02-17 16:42:25 +08:00
|
|
|
void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
|
|
|
|
QualType elementType, InitListExpr *E);
|
|
|
|
|
2011-08-26 04:40:09 +08:00
|
|
|
AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
|
2011-09-14 01:21:33 +08:00
|
|
|
if (CGF.getLangOptions().getGC() && TypeRequiresGCollection(T))
|
2011-08-26 04:40:09 +08:00
|
|
|
return AggValueSlot::NeedsGCBarriers;
|
|
|
|
return AggValueSlot::DoesNotNeedGCBarriers;
|
|
|
|
}
|
|
|
|
|
2010-05-23 06:13:32 +08:00
|
|
|
bool TypeRequiresGCollection(QualType T);
|
|
|
|
|
2007-08-21 12:59:27 +08:00
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Visitor Methods
|
|
|
|
//===--------------------------------------------------------------------===//
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
void VisitStmt(Stmt *S) {
|
2008-08-16 08:56:44 +08:00
|
|
|
CGF.ErrorUnsupported(S, "aggregate expression");
|
2007-08-21 12:25:47 +08:00
|
|
|
}
|
|
|
|
void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
|
2011-04-15 08:35:48 +08:00
|
|
|
void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
|
|
|
|
Visit(GE->getResultExpr());
|
|
|
|
}
|
2009-01-27 17:03:41 +08:00
|
|
|
void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
|
2011-07-15 13:09:51 +08:00
|
|
|
void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
|
|
|
|
return Visit(E->getReplacement());
|
|
|
|
}
|
2007-08-21 12:25:47 +08:00
|
|
|
|
|
|
|
// l-values.
|
2012-03-10 11:05:10 +08:00
|
|
|
void emitDeclRef(ValueDecl *VD, Expr *refExpr) {
|
|
|
|
// For aggregates, we should always be able to emit the variable
|
|
|
|
// as an l-value unless it's a reference. This is due to the fact
|
|
|
|
// that we can't actually ever see a normal l2r conversion on an
|
|
|
|
// aggregate in C++, and in C there's no language standard
|
|
|
|
// actively preventing us from listing variables in the captures
|
|
|
|
// list of a block.
|
|
|
|
if (VD->getType()->isReferenceType()) {
|
|
|
|
if (CodeGenFunction::ConstantEmission result
|
|
|
|
= CGF.tryEmitAsConstant(VD, refExpr)) {
|
|
|
|
EmitFinalDestCopy(refExpr, result.getReferenceLValue(CGF, refExpr));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
EmitAggLoadOfLValue(refExpr);
|
|
|
|
}
|
|
|
|
void VisitDeclRefExpr(DeclRefExpr *E) { emitDeclRef(E->getDecl(), E); }
|
|
|
|
void VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
|
|
|
|
emitDeclRef(E->getDecl(), E);
|
|
|
|
}
|
|
|
|
|
2007-12-14 10:04:12 +08:00
|
|
|
void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
|
|
|
|
void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
|
2010-01-05 02:47:06 +08:00
|
|
|
void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
|
2011-06-17 12:59:12 +08:00
|
|
|
void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
|
2007-12-14 10:04:12 +08:00
|
|
|
void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
|
|
|
|
EmitAggLoadOfLValue(E);
|
|
|
|
}
|
2009-04-22 07:00:09 +08:00
|
|
|
void VisitPredefinedExpr(const PredefinedExpr *E) {
|
2009-09-09 23:08:12 +08:00
|
|
|
EmitAggLoadOfLValue(E);
|
2009-04-22 07:00:09 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
// Operators.
|
2009-08-08 07:22:37 +08:00
|
|
|
void VisitCastExpr(CastExpr *E);
|
2007-11-01 06:04:46 +08:00
|
|
|
void VisitCallExpr(const CallExpr *E);
|
2007-09-01 06:54:14 +08:00
|
|
|
void VisitStmtExpr(const StmtExpr *E);
|
2007-08-21 12:25:47 +08:00
|
|
|
void VisitBinaryOperator(const BinaryOperator *BO);
|
2009-10-23 06:57:31 +08:00
|
|
|
void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
|
2007-08-21 12:43:17 +08:00
|
|
|
void VisitBinAssign(const BinaryOperator *E);
|
2008-05-20 15:56:31 +08:00
|
|
|
void VisitBinComma(const BinaryOperator *E);
|
2007-08-21 12:25:47 +08:00
|
|
|
|
2008-06-25 01:04:18 +08:00
|
|
|
void VisitObjCMessageExpr(ObjCMessageExpr *E);
|
2008-08-23 18:51:21 +08:00
|
|
|
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
|
|
|
|
EmitAggLoadOfLValue(E);
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
|
2009-07-09 02:33:14 +08:00
|
|
|
void VisitChooseExpr(const ChooseExpr *CE);
|
2007-10-27 01:44:44 +08:00
|
|
|
void VisitInitListExpr(InitListExpr *E);
|
2009-12-16 14:57:54 +08:00
|
|
|
void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
|
2008-04-08 12:40:51 +08:00
|
|
|
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
|
|
|
|
Visit(DAE->getExpr());
|
|
|
|
}
|
2009-05-31 07:23:33 +08:00
|
|
|
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
|
2009-05-04 01:47:16 +08:00
|
|
|
void VisitCXXConstructExpr(const CXXConstructExpr *E);
|
2012-02-09 11:32:31 +08:00
|
|
|
void VisitLambdaExpr(LambdaExpr *E);
|
2010-12-06 16:20:24 +08:00
|
|
|
void VisitExprWithCleanups(ExprWithCleanups *E);
|
2010-07-08 14:14:04 +08:00
|
|
|
void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
|
2009-11-18 08:40:12 +08:00
|
|
|
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
|
2011-06-22 01:03:29 +08:00
|
|
|
void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
|
2011-02-16 16:02:54 +08:00
|
|
|
void VisitOpaqueValueExpr(OpaqueValueExpr *E);
|
|
|
|
|
2011-11-06 17:01:30 +08:00
|
|
|
void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
|
|
|
|
if (E->isGLValue()) {
|
|
|
|
LValue LV = CGF.EmitPseudoObjectLValue(E);
|
|
|
|
return EmitFinalDestCopy(E, LV);
|
|
|
|
}
|
|
|
|
|
|
|
|
CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
|
|
|
|
}
|
|
|
|
|
2008-05-27 23:51:49 +08:00
|
|
|
void VisitVAArgExpr(VAArgExpr *E);
|
2008-04-05 02:42:16 +08:00
|
|
|
|
2011-06-16 12:16:24 +08:00
|
|
|
void EmitInitializationToLValue(Expr *E, LValue Address);
|
|
|
|
void EmitNullInitializationToLValue(LValue Address);
|
2007-08-21 12:25:47 +08:00
|
|
|
// case Expr::ChooseExprClass:
|
2009-12-10 03:24:08 +08:00
|
|
|
void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
|
2011-10-11 10:20:01 +08:00
|
|
|
void VisitAtomicExpr(AtomicExpr *E) {
|
|
|
|
CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
|
|
|
|
}
|
2007-08-21 12:25:47 +08:00
|
|
|
};
|
|
|
|
} // end anonymous namespace.
|
|
|
|
|
2007-08-21 12:59:27 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Utilities
|
|
|
|
//===----------------------------------------------------------------------===//
|
2007-08-21 12:25:47 +08:00
|
|
|
|
2007-08-11 08:04:45 +08:00
|
|
|
/// EmitAggLoadOfLValue - Given an expression with aggregate type that
|
|
|
|
/// represents a value lvalue, this method emits the address of the lvalue,
|
|
|
|
/// then loads the result into DestPtr.
|
2007-08-21 12:25:47 +08:00
|
|
|
void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
|
|
|
|
LValue LV = CGF.EmitLValue(E);
|
2009-05-24 04:28:01 +08:00
|
|
|
EmitFinalDestCopy(E, LV);
|
|
|
|
}
|
|
|
|
|
2010-05-23 06:13:32 +08:00
|
|
|
/// \brief True if the given aggregate type requires special GC API calls.
|
|
|
|
bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
|
|
|
|
// Only record types have members that might require garbage collection.
|
|
|
|
const RecordType *RecordTy = T->getAs<RecordType>();
|
|
|
|
if (!RecordTy) return false;
|
|
|
|
|
|
|
|
// Don't mess with non-trivial C++ types.
|
|
|
|
RecordDecl *Record = RecordTy->getDecl();
|
|
|
|
if (isa<CXXRecordDecl>(Record) &&
|
|
|
|
(!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() ||
|
|
|
|
!cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check whether the type has an object member.
|
|
|
|
return Record->hasObjectMember();
|
|
|
|
}
|
|
|
|
|
2011-08-26 07:04:34 +08:00
|
|
|
/// \brief Perform the final move to DestPtr if for some reason
|
|
|
|
/// getReturnValueSlot() didn't use it directly.
|
2010-05-23 06:13:32 +08:00
|
|
|
///
|
|
|
|
/// The idea is that you do something like this:
|
|
|
|
/// RValue Result = EmitSomething(..., getReturnValueSlot());
|
2011-08-26 07:04:34 +08:00
|
|
|
/// EmitMoveFromReturnSlot(E, Result);
|
|
|
|
///
|
|
|
|
/// If nothing interferes, this will cause the result to be emitted
|
|
|
|
/// directly into the return value slot. Otherwise, a final move
|
|
|
|
/// will be performed.
|
|
|
|
void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
|
|
|
|
if (shouldUseDestForReturnSlot()) {
|
|
|
|
// Logically, Dest.getAddr() should equal Src.getAggregateAddr().
|
|
|
|
// The possibility of undef rvalues complicates that a lot,
|
|
|
|
// though, so we can't really assert.
|
|
|
|
return;
|
2010-06-16 06:44:06 +08:00
|
|
|
}
|
2011-08-26 07:04:34 +08:00
|
|
|
|
|
|
|
// Otherwise, do a final copy,
|
|
|
|
assert(Dest.getAddr() != Src.getAggregateAddr());
|
|
|
|
EmitFinalDestCopy(E, Src, /*Ignore*/ true);
|
2010-05-23 06:13:32 +08:00
|
|
|
}
|
|
|
|
|
2009-05-24 04:28:01 +08:00
|
|
|
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
2011-12-06 06:23:28 +08:00
|
|
|
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore,
|
|
|
|
unsigned Alignment) {
|
2009-05-24 04:28:01 +08:00
|
|
|
assert(Src.isAggregate() && "value must be aggregate value!");
|
|
|
|
|
2010-09-15 18:14:12 +08:00
|
|
|
// If Dest is ignored, then we're evaluating an aggregate expression
|
2010-08-25 10:50:31 +08:00
|
|
|
// in a context (like an expression statement) that doesn't care
|
|
|
|
// about the result. C says that an lvalue-to-rvalue conversion is
|
|
|
|
// performed in these cases; C++ says that it is not. In either
|
|
|
|
// case, we don't actually need to do anything unless the value is
|
|
|
|
// volatile.
|
2010-09-15 18:14:12 +08:00
|
|
|
if (Dest.isIgnored()) {
|
2010-08-25 10:50:31 +08:00
|
|
|
if (!Src.isVolatileQualified() ||
|
|
|
|
CGF.CGM.getLangOptions().CPlusPlus ||
|
|
|
|
(IgnoreResult && Ignore))
|
2009-05-24 06:01:27 +08:00
|
|
|
return;
|
2010-10-23 06:05:03 +08:00
|
|
|
|
2009-05-27 06:03:21 +08:00
|
|
|
// If the source is volatile, we must read from it; to do that, we need
|
|
|
|
// some place to put it.
|
2010-09-15 18:14:12 +08:00
|
|
|
Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
|
2009-05-24 06:01:27 +08:00
|
|
|
}
|
2007-08-11 08:04:45 +08:00
|
|
|
|
2010-09-16 11:13:23 +08:00
|
|
|
if (Dest.requiresGCollection()) {
|
2011-04-25 01:08:00 +08:00
|
|
|
CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
|
2011-04-25 01:08:00 +08:00
|
|
|
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
|
2009-09-01 03:33:16 +08:00
|
|
|
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
|
2010-09-15 18:14:12 +08:00
|
|
|
Dest.getAddr(),
|
|
|
|
Src.getAggregateAddr(),
|
|
|
|
SizeVal);
|
2009-09-01 03:33:16 +08:00
|
|
|
return;
|
|
|
|
}
|
2009-05-24 04:28:01 +08:00
|
|
|
// If the result of the assignment is used, copy the LHS there also.
|
|
|
|
// FIXME: Pass VolatileDest as well. I think we also need to merge volatile
|
|
|
|
// from the source as well, as we can't eliminate it if either operand
|
|
|
|
// is volatile, unless copy has volatile for both source and destination..
|
2010-09-15 18:14:12 +08:00
|
|
|
CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
|
2011-12-06 06:23:28 +08:00
|
|
|
Dest.isVolatile()|Src.isVolatileQualified(),
|
|
|
|
Alignment);
|
2009-05-24 04:28:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
2009-05-27 06:03:21 +08:00
|
|
|
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
|
2009-05-24 04:28:01 +08:00
|
|
|
assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
|
|
|
|
|
2011-12-06 06:23:28 +08:00
|
|
|
CharUnits Alignment = std::min(Src.getAlignment(), Dest.getAlignment());
|
|
|
|
EmitFinalDestCopy(E, Src.asAggregateRValue(), Ignore, Alignment.getQuantity());
|
2007-08-11 08:04:45 +08:00
|
|
|
}
|
|
|
|
|
2012-02-17 16:42:25 +08:00
|
|
|
static QualType GetStdInitializerListElementType(QualType T) {
|
|
|
|
// Just assume that this is really std::initializer_list.
|
|
|
|
ClassTemplateSpecializationDecl *specialization =
|
|
|
|
cast<ClassTemplateSpecializationDecl>(T->castAs<RecordType>()->getDecl());
|
|
|
|
return specialization->getTemplateArgs()[0].getAsType();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Prepare cleanup for the temporary array.
|
|
|
|
static void EmitStdInitializerListCleanup(CodeGenFunction &CGF,
|
|
|
|
QualType arrayType,
|
|
|
|
llvm::Value *addr,
|
|
|
|
const InitListExpr *initList) {
|
|
|
|
QualType::DestructionKind dtorKind = arrayType.isDestructedType();
|
|
|
|
if (!dtorKind)
|
|
|
|
return; // Type doesn't need destroying.
|
|
|
|
if (dtorKind != QualType::DK_cxx_destructor) {
|
|
|
|
CGF.ErrorUnsupported(initList, "ObjC ARC type in initializer_list");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
CodeGenFunction::Destroyer *destroyer = CGF.getDestroyer(dtorKind);
|
|
|
|
CGF.pushDestroy(NormalAndEHCleanup, addr, arrayType, destroyer,
|
|
|
|
/*EHCleanup=*/true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Emit the initializer for a std::initializer_list initialized with a
|
|
|
|
/// real initializer list.
|
2012-02-19 20:28:02 +08:00
|
|
|
void AggExprEmitter::EmitStdInitializerList(llvm::Value *destPtr,
|
|
|
|
InitListExpr *initList) {
|
2012-02-17 16:42:25 +08:00
|
|
|
// We emit an array containing the elements, then have the init list point
|
|
|
|
// at the array.
|
|
|
|
ASTContext &ctx = CGF.getContext();
|
|
|
|
unsigned numInits = initList->getNumInits();
|
|
|
|
QualType element = GetStdInitializerListElementType(initList->getType());
|
|
|
|
llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
|
|
|
|
QualType array = ctx.getConstantArrayType(element, size, ArrayType::Normal,0);
|
|
|
|
llvm::Type *LTy = CGF.ConvertTypeForMem(array);
|
|
|
|
llvm::AllocaInst *alloc = CGF.CreateTempAlloca(LTy);
|
|
|
|
alloc->setAlignment(ctx.getTypeAlignInChars(array).getQuantity());
|
|
|
|
alloc->setName(".initlist.");
|
|
|
|
|
|
|
|
EmitArrayInit(alloc, cast<llvm::ArrayType>(LTy), element, initList);
|
|
|
|
|
|
|
|
// FIXME: The diagnostics are somewhat out of place here.
|
|
|
|
RecordDecl *record = initList->getType()->castAs<RecordType>()->getDecl();
|
|
|
|
RecordDecl::field_iterator field = record->field_begin();
|
|
|
|
if (field == record->field_end()) {
|
|
|
|
CGF.ErrorUnsupported(initList, "weird std::initializer_list");
|
2012-02-26 04:51:13 +08:00
|
|
|
return;
|
2012-02-17 16:42:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
QualType elementPtr = ctx.getPointerType(element.withConst());
|
|
|
|
|
|
|
|
// Start pointer.
|
|
|
|
if (!ctx.hasSameType(field->getType(), elementPtr)) {
|
|
|
|
CGF.ErrorUnsupported(initList, "weird std::initializer_list");
|
2012-02-26 04:51:13 +08:00
|
|
|
return;
|
2012-02-17 16:42:25 +08:00
|
|
|
}
|
|
|
|
LValue start = CGF.EmitLValueForFieldInitialization(destPtr, *field, 0);
|
|
|
|
llvm::Value *arrayStart = Builder.CreateStructGEP(alloc, 0, "arraystart");
|
|
|
|
CGF.EmitStoreThroughLValue(RValue::get(arrayStart), start);
|
|
|
|
++field;
|
|
|
|
|
|
|
|
if (field == record->field_end()) {
|
|
|
|
CGF.ErrorUnsupported(initList, "weird std::initializer_list");
|
2012-02-26 04:51:13 +08:00
|
|
|
return;
|
2012-02-17 16:42:25 +08:00
|
|
|
}
|
|
|
|
LValue endOrLength = CGF.EmitLValueForFieldInitialization(destPtr, *field, 0);
|
|
|
|
if (ctx.hasSameType(field->getType(), elementPtr)) {
|
|
|
|
// End pointer.
|
|
|
|
llvm::Value *arrayEnd = Builder.CreateStructGEP(alloc,numInits, "arrayend");
|
|
|
|
CGF.EmitStoreThroughLValue(RValue::get(arrayEnd), endOrLength);
|
|
|
|
} else if(ctx.hasSameType(field->getType(), ctx.getSizeType())) {
|
|
|
|
// Length.
|
|
|
|
CGF.EmitStoreThroughLValue(RValue::get(Builder.getInt(size)), endOrLength);
|
|
|
|
} else {
|
|
|
|
CGF.ErrorUnsupported(initList, "weird std::initializer_list");
|
2012-02-26 04:51:13 +08:00
|
|
|
return;
|
2012-02-17 16:42:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!Dest.isExternallyDestructed())
|
|
|
|
EmitStdInitializerListCleanup(CGF, array, alloc, initList);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Emit initialization of an array from an initializer list.
|
|
|
|
void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
|
|
|
|
QualType elementType, InitListExpr *E) {
|
|
|
|
uint64_t NumInitElements = E->getNumInits();
|
|
|
|
|
|
|
|
uint64_t NumArrayElements = AType->getNumElements();
|
|
|
|
assert(NumInitElements <= NumArrayElements);
|
|
|
|
|
|
|
|
// DestPtr is an array*. Construct an elementType* by drilling
|
|
|
|
// down a level.
|
|
|
|
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
|
|
|
|
llvm::Value *indices[] = { zero, zero };
|
|
|
|
llvm::Value *begin =
|
|
|
|
Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
|
|
|
|
|
|
|
|
// Exception safety requires us to destroy all the
|
|
|
|
// already-constructed members if an initializer throws.
|
|
|
|
// For that, we'll need an EH cleanup.
|
|
|
|
QualType::DestructionKind dtorKind = elementType.isDestructedType();
|
|
|
|
llvm::AllocaInst *endOfInit = 0;
|
|
|
|
EHScopeStack::stable_iterator cleanup;
|
|
|
|
llvm::Instruction *cleanupDominator = 0;
|
|
|
|
if (CGF.needsEHCleanup(dtorKind)) {
|
|
|
|
// In principle we could tell the cleanup where we are more
|
|
|
|
// directly, but the control flow can get so varied here that it
|
|
|
|
// would actually be quite complex. Therefore we go through an
|
|
|
|
// alloca.
|
|
|
|
endOfInit = CGF.CreateTempAlloca(begin->getType(),
|
|
|
|
"arrayinit.endOfInit");
|
|
|
|
cleanupDominator = Builder.CreateStore(begin, endOfInit);
|
|
|
|
CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
|
|
|
|
CGF.getDestroyer(dtorKind));
|
|
|
|
cleanup = CGF.EHStack.stable_begin();
|
|
|
|
|
|
|
|
// Otherwise, remember that we didn't need a cleanup.
|
|
|
|
} else {
|
|
|
|
dtorKind = QualType::DK_none;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
|
|
|
|
|
|
|
|
// The 'current element to initialize'. The invariants on this
|
|
|
|
// variable are complicated. Essentially, after each iteration of
|
|
|
|
// the loop, it points to the last initialized element, except
|
|
|
|
// that it points to the beginning of the array before any
|
|
|
|
// elements have been initialized.
|
|
|
|
llvm::Value *element = begin;
|
|
|
|
|
|
|
|
// Emit the explicit initializers.
|
|
|
|
for (uint64_t i = 0; i != NumInitElements; ++i) {
|
|
|
|
// Advance to the next element.
|
|
|
|
if (i > 0) {
|
|
|
|
element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
|
|
|
|
|
|
|
|
// Tell the cleanup that it needs to destroy up to this
|
|
|
|
// element. TODO: some of these stores can be trivially
|
|
|
|
// observed to be unnecessary.
|
|
|
|
if (endOfInit) Builder.CreateStore(element, endOfInit);
|
|
|
|
}
|
|
|
|
|
2012-02-19 20:28:02 +08:00
|
|
|
// If these are nested std::initializer_list inits, do them directly,
|
|
|
|
// because they are conceptually the same "location".
|
|
|
|
InitListExpr *initList = dyn_cast<InitListExpr>(E->getInit(i));
|
|
|
|
if (initList && initList->initializesStdInitializerList()) {
|
|
|
|
EmitStdInitializerList(element, initList);
|
|
|
|
} else {
|
|
|
|
LValue elementLV = CGF.MakeAddrLValue(element, elementType);
|
|
|
|
EmitInitializationToLValue(E->getInit(i), elementLV);
|
|
|
|
}
|
2012-02-17 16:42:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether there's a non-trivial array-fill expression.
|
|
|
|
// Note that this will be a CXXConstructExpr even if the element
|
|
|
|
// type is an array (or array of array, etc.) of class type.
|
|
|
|
Expr *filler = E->getArrayFiller();
|
|
|
|
bool hasTrivialFiller = true;
|
|
|
|
if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
|
|
|
|
assert(cons->getConstructor()->isDefaultConstructor());
|
|
|
|
hasTrivialFiller = cons->getConstructor()->isTrivial();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Any remaining elements need to be zero-initialized, possibly
|
|
|
|
// using the filler expression. We can skip this if the we're
|
|
|
|
// emitting to zeroed memory.
|
|
|
|
if (NumInitElements != NumArrayElements &&
|
|
|
|
!(Dest.isZeroed() && hasTrivialFiller &&
|
|
|
|
CGF.getTypes().isZeroInitializable(elementType))) {
|
|
|
|
|
|
|
|
// Use an actual loop. This is basically
|
|
|
|
// do { *array++ = filler; } while (array != end);
|
|
|
|
|
|
|
|
// Advance to the start of the rest of the array.
|
|
|
|
if (NumInitElements) {
|
|
|
|
element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
|
|
|
|
if (endOfInit) Builder.CreateStore(element, endOfInit);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the end of the array.
|
|
|
|
llvm::Value *end = Builder.CreateInBoundsGEP(begin,
|
|
|
|
llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
|
|
|
|
"arrayinit.end");
|
|
|
|
|
|
|
|
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
|
|
|
|
llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
|
|
|
|
|
|
|
|
// Jump into the body.
|
|
|
|
CGF.EmitBlock(bodyBB);
|
|
|
|
llvm::PHINode *currentElement =
|
|
|
|
Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
|
|
|
|
currentElement->addIncoming(element, entryBB);
|
|
|
|
|
|
|
|
// Emit the actual filler expression.
|
|
|
|
LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
|
|
|
|
if (filler)
|
|
|
|
EmitInitializationToLValue(filler, elementLV);
|
|
|
|
else
|
|
|
|
EmitNullInitializationToLValue(elementLV);
|
|
|
|
|
|
|
|
// Move on to the next element.
|
|
|
|
llvm::Value *nextElement =
|
|
|
|
Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
|
|
|
|
|
|
|
|
// Tell the EH cleanup that we finished with the last element.
|
|
|
|
if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
|
|
|
|
|
|
|
|
// Leave the loop if we're done.
|
|
|
|
llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
|
|
|
|
"arrayinit.done");
|
|
|
|
llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
|
|
|
|
Builder.CreateCondBr(done, endBB, bodyBB);
|
|
|
|
currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
|
|
|
|
|
|
|
|
CGF.EmitBlock(endBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Leave the partial-array cleanup if we entered one.
|
|
|
|
if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
|
|
|
|
}
|
|
|
|
|
2007-08-21 12:59:27 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Visitor Methods
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2011-06-22 01:03:29 +08:00
|
|
|
void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
|
|
|
|
Visit(E->GetTemporaryExpr());
|
|
|
|
}
|
|
|
|
|
2011-02-16 16:02:54 +08:00
|
|
|
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
|
2011-02-17 18:25:35 +08:00
|
|
|
EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
|
2011-02-16 16:02:54 +08:00
|
|
|
}
|
|
|
|
|
2011-06-17 12:59:12 +08:00
|
|
|
void
|
|
|
|
AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
|
2011-06-18 00:37:20 +08:00
|
|
|
if (E->getType().isPODType(CGF.getContext())) {
|
|
|
|
// For a POD type, just emit a load of the lvalue + a copy, because our
|
|
|
|
// compound literal might alias the destination.
|
|
|
|
// FIXME: This is a band-aid; the real problem appears to be in our handling
|
|
|
|
// of assignments, where we store directly into the LHS without checking
|
|
|
|
// whether anything in the RHS aliases.
|
|
|
|
EmitAggLoadOfLValue(E);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-06-17 12:59:12 +08:00
|
|
|
AggValueSlot Slot = EnsureSlot(E->getType());
|
|
|
|
CGF.EmitAggExpr(E->getInitializer(), Slot);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-08-08 07:22:37 +08:00
|
|
|
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
|
2009-09-29 09:23:39 +08:00
|
|
|
switch (E->getCastKind()) {
|
2011-04-11 10:03:26 +08:00
|
|
|
case CK_Dynamic: {
|
2010-05-15 05:31:02 +08:00
|
|
|
assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
|
|
|
|
LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
|
|
|
|
// FIXME: Do we also need to handle property references here?
|
|
|
|
if (LV.isSimple())
|
|
|
|
CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
|
|
|
|
else
|
|
|
|
CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
|
|
|
|
|
2010-09-15 18:14:12 +08:00
|
|
|
if (!Dest.isIgnored())
|
|
|
|
CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
|
2010-05-15 05:31:02 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_ToUnion: {
|
2011-04-13 06:02:02 +08:00
|
|
|
if (Dest.isIgnored()) break;
|
|
|
|
|
2009-08-08 07:22:37 +08:00
|
|
|
// GCC union extension
|
2010-08-21 11:15:20 +08:00
|
|
|
QualType Ty = E->getSubExpr()->getType();
|
|
|
|
QualType PtrTy = CGF.getContext().getPointerType(Ty);
|
2010-09-15 18:14:12 +08:00
|
|
|
llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
|
2009-06-04 04:45:06 +08:00
|
|
|
CGF.ConvertType(PtrTy));
|
2011-06-16 12:16:24 +08:00
|
|
|
EmitInitializationToLValue(E->getSubExpr(),
|
|
|
|
CGF.MakeAddrLValue(CastPtr, Ty));
|
2009-09-29 09:23:39 +08:00
|
|
|
break;
|
2009-01-16 04:14:33 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_DerivedToBase:
|
|
|
|
case CK_BaseToDerived:
|
|
|
|
case CK_UncheckedDerivedToBase: {
|
2011-09-23 13:06:16 +08:00
|
|
|
llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
|
2010-05-22 13:17:18 +08:00
|
|
|
"should have been unpacked before we got here");
|
|
|
|
}
|
|
|
|
|
2010-12-04 11:47:34 +08:00
|
|
|
case CK_LValueToRValue: // hope for downstream optimization
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_NoOp:
|
2012-01-17 01:27:18 +08:00
|
|
|
case CK_AtomicToNonAtomic:
|
|
|
|
case CK_NonAtomicToAtomic:
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_UserDefinedConversion:
|
|
|
|
case CK_ConstructorConversion:
|
2009-09-29 09:23:39 +08:00
|
|
|
assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
|
|
|
|
E->getType()) &&
|
|
|
|
"Implicit cast types must be compatible");
|
|
|
|
Visit(E->getSubExpr());
|
|
|
|
break;
|
2010-12-01 12:43:34 +08:00
|
|
|
|
2010-08-25 19:45:40 +08:00
|
|
|
case CK_LValueBitCast:
|
2010-12-01 12:43:34 +08:00
|
|
|
llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
|
2011-04-07 16:22:57 +08:00
|
|
|
|
2010-12-01 12:43:34 +08:00
|
|
|
case CK_Dependent:
|
|
|
|
case CK_BitCast:
|
|
|
|
case CK_ArrayToPointerDecay:
|
|
|
|
case CK_FunctionToPointerDecay:
|
|
|
|
case CK_NullToPointer:
|
|
|
|
case CK_NullToMemberPointer:
|
|
|
|
case CK_BaseToDerivedMemberPointer:
|
|
|
|
case CK_DerivedToBaseMemberPointer:
|
|
|
|
case CK_MemberPointerToBoolean:
|
2012-02-15 09:22:51 +08:00
|
|
|
case CK_ReinterpretMemberPointer:
|
2010-12-01 12:43:34 +08:00
|
|
|
case CK_IntegralToPointer:
|
|
|
|
case CK_PointerToIntegral:
|
|
|
|
case CK_PointerToBoolean:
|
|
|
|
case CK_ToVoid:
|
|
|
|
case CK_VectorSplat:
|
|
|
|
case CK_IntegralCast:
|
|
|
|
case CK_IntegralToBoolean:
|
|
|
|
case CK_IntegralToFloating:
|
|
|
|
case CK_FloatingToIntegral:
|
|
|
|
case CK_FloatingToBoolean:
|
|
|
|
case CK_FloatingCast:
|
2011-09-09 13:25:32 +08:00
|
|
|
case CK_CPointerToObjCPointerCast:
|
|
|
|
case CK_BlockPointerToObjCPointerCast:
|
2010-12-01 12:43:34 +08:00
|
|
|
case CK_AnyPointerToBlockPointerCast:
|
|
|
|
case CK_ObjCObjectLValueCast:
|
|
|
|
case CK_FloatingRealToComplex:
|
|
|
|
case CK_FloatingComplexToReal:
|
|
|
|
case CK_FloatingComplexToBoolean:
|
|
|
|
case CK_FloatingComplexCast:
|
|
|
|
case CK_FloatingComplexToIntegralComplex:
|
|
|
|
case CK_IntegralRealToComplex:
|
|
|
|
case CK_IntegralComplexToReal:
|
|
|
|
case CK_IntegralComplexToBoolean:
|
|
|
|
case CK_IntegralComplexCast:
|
|
|
|
case CK_IntegralComplexToFloatingComplex:
|
2011-09-10 14:18:15 +08:00
|
|
|
case CK_ARCProduceObject:
|
|
|
|
case CK_ARCConsumeObject:
|
|
|
|
case CK_ARCReclaimReturnedObject:
|
|
|
|
case CK_ARCExtendBlockObject:
|
2012-02-22 13:02:47 +08:00
|
|
|
case CK_CopyAndAutoreleaseBlockObject:
|
2010-12-01 12:43:34 +08:00
|
|
|
llvm_unreachable("cast kind invalid for aggregate types");
|
2009-09-29 09:23:39 +08:00
|
|
|
}
|
2008-01-14 14:28:57 +08:00
|
|
|
}
|
|
|
|
|
2008-07-27 06:37:01 +08:00
|
|
|
void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
|
2009-05-28 00:45:02 +08:00
|
|
|
if (E->getCallReturnType()->isReferenceType()) {
|
|
|
|
EmitAggLoadOfLValue(E);
|
|
|
|
return;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-05-23 06:13:32 +08:00
|
|
|
RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
|
2011-08-26 07:04:34 +08:00
|
|
|
EmitMoveFromReturnSlot(E, RV);
|
2008-01-31 13:38:29 +08:00
|
|
|
}
|
2008-07-27 06:37:01 +08:00
|
|
|
|
|
|
|
void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
|
2010-05-23 06:13:32 +08:00
|
|
|
RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
|
2011-08-26 07:04:34 +08:00
|
|
|
EmitMoveFromReturnSlot(E, RV);
|
2008-06-25 01:04:18 +08:00
|
|
|
}
|
2008-01-31 13:38:29 +08:00
|
|
|
|
2008-07-27 06:37:01 +08:00
|
|
|
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
|
2010-12-05 10:00:02 +08:00
|
|
|
CGF.EmitIgnoredExpr(E->getLHS());
|
2010-09-15 18:14:12 +08:00
|
|
|
Visit(E->getRHS());
|
2008-05-20 15:56:31 +08:00
|
|
|
}
|
|
|
|
|
2007-09-01 06:54:14 +08:00
|
|
|
void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
|
2011-01-26 12:00:11 +08:00
|
|
|
CodeGenFunction::StmtExprEvaluation eval(CGF);
|
2010-09-15 18:14:12 +08:00
|
|
|
CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
|
2007-09-01 06:54:14 +08:00
|
|
|
}
|
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
|
2010-08-25 19:45:40 +08:00
|
|
|
if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
|
2009-10-23 06:57:31 +08:00
|
|
|
VisitPointerToDataMemberBinaryOperator(E);
|
|
|
|
else
|
|
|
|
CGF.ErrorUnsupported(E, "aggregate binary expression");
|
|
|
|
}
|
|
|
|
|
|
|
|
void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
|
|
|
|
const BinaryOperator *E) {
|
|
|
|
LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
|
|
|
|
EmitFinalDestCopy(E, LV);
|
2007-08-21 12:59:27 +08:00
|
|
|
}
|
|
|
|
|
2007-08-21 12:43:17 +08:00
|
|
|
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
|
2008-02-11 09:09:17 +08:00
|
|
|
// For an assignment to work, the value on the right has
|
|
|
|
// to be compatible with the value on the left.
|
2009-05-29 07:04:00 +08:00
|
|
|
assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
|
|
|
|
E->getRHS()->getType())
|
2008-02-11 09:09:17 +08:00
|
|
|
&& "Invalid assignment");
|
2010-12-06 14:10:02 +08:00
|
|
|
|
2011-04-30 05:53:21 +08:00
|
|
|
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
|
2011-04-30 06:11:28 +08:00
|
|
|
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
|
2011-04-30 05:53:21 +08:00
|
|
|
if (VD->hasAttr<BlocksAttr>() &&
|
|
|
|
E->getRHS()->HasSideEffects(CGF.getContext())) {
|
|
|
|
// When __block variable on LHS, the RHS must be evaluated first
|
|
|
|
// as it may change the 'forwarding' field via call to Block_copy.
|
|
|
|
LValue RHS = CGF.EmitLValue(E->getRHS());
|
|
|
|
LValue LHS = CGF.EmitLValue(E->getLHS());
|
2011-08-26 04:40:09 +08:00
|
|
|
Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
|
2011-08-26 15:31:35 +08:00
|
|
|
needsGC(E->getLHS()->getType()),
|
|
|
|
AggValueSlot::IsAliased);
|
2011-04-30 05:53:21 +08:00
|
|
|
EmitFinalDestCopy(E, RHS, true);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
LValue LHS = CGF.EmitLValue(E->getLHS());
|
2007-08-11 08:04:45 +08:00
|
|
|
|
2011-11-07 11:59:57 +08:00
|
|
|
// Codegen the RHS so that it stores directly into the LHS.
|
|
|
|
AggValueSlot LHSSlot =
|
|
|
|
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
|
|
|
|
needsGC(E->getLHS()->getType()),
|
|
|
|
AggValueSlot::IsAliased);
|
|
|
|
CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
|
|
|
|
EmitFinalDestCopy(E, LHS, true);
|
2007-08-11 08:04:45 +08:00
|
|
|
}
|
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
void AggExprEmitter::
|
|
|
|
VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
|
2008-11-13 09:38:36 +08:00
|
|
|
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
|
|
|
|
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
|
|
|
|
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-02-17 18:25:35 +08:00
|
|
|
// Bind the common expression if necessary.
|
2012-01-07 04:42:20 +08:00
|
|
|
CodeGenFunction::OpaqueValueMapping binding(CGF, E);
|
2011-02-17 18:25:35 +08:00
|
|
|
|
2011-01-26 12:00:11 +08:00
|
|
|
CodeGenFunction::ConditionalEvaluation eval(CGF);
|
2009-12-25 14:17:05 +08:00
|
|
|
CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-11-17 08:07:33 +08:00
|
|
|
// Save whether the destination's lifetime is externally managed.
|
2011-08-26 16:02:37 +08:00
|
|
|
bool isExternallyDestructed = Dest.isExternallyDestructed();
|
2007-08-11 08:04:45 +08:00
|
|
|
|
2011-01-26 12:00:11 +08:00
|
|
|
eval.begin(CGF);
|
|
|
|
CGF.EmitBlock(LHSBlock);
|
2011-02-17 18:25:35 +08:00
|
|
|
Visit(E->getTrueExpr());
|
2011-01-26 12:00:11 +08:00
|
|
|
eval.end(CGF);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-01-26 12:00:11 +08:00
|
|
|
assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
|
|
|
|
CGF.Builder.CreateBr(ContBlock);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-11-17 08:07:33 +08:00
|
|
|
// If the result of an agg expression is unused, then the emission
|
|
|
|
// of the LHS might need to create a destination slot. That's fine
|
|
|
|
// with us, and we can safely emit the RHS into the same slot, but
|
2011-08-26 16:02:37 +08:00
|
|
|
// we shouldn't claim that it's already being destructed.
|
|
|
|
Dest.setExternallyDestructed(isExternallyDestructed);
|
2010-11-17 08:07:33 +08:00
|
|
|
|
2011-01-26 12:00:11 +08:00
|
|
|
eval.begin(CGF);
|
|
|
|
CGF.EmitBlock(RHSBlock);
|
2011-02-17 18:25:35 +08:00
|
|
|
Visit(E->getFalseExpr());
|
2011-01-26 12:00:11 +08:00
|
|
|
eval.end(CGF);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2007-08-21 12:25:47 +08:00
|
|
|
CGF.EmitBlock(ContBlock);
|
2007-08-11 08:04:45 +08:00
|
|
|
}
|
2007-08-21 12:59:27 +08:00
|
|
|
|
2009-07-09 02:33:14 +08:00
|
|
|
void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
|
|
|
|
Visit(CE->getChosenSubExpr(CGF.getContext()));
|
|
|
|
}
|
|
|
|
|
2008-05-27 23:51:49 +08:00
|
|
|
void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
|
2009-02-12 06:25:55 +08:00
|
|
|
llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
|
2008-11-04 13:30:00 +08:00
|
|
|
llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
|
|
|
|
|
2009-01-10 05:09:38 +08:00
|
|
|
if (!ArgPtr) {
|
2008-11-04 13:30:00 +08:00
|
|
|
CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
|
2009-01-10 05:09:38 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-08-21 11:15:20 +08:00
|
|
|
EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
|
2008-05-27 23:51:49 +08:00
|
|
|
}
|
|
|
|
|
2009-05-31 07:23:33 +08:00
|
|
|
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
|
2010-09-15 18:14:12 +08:00
|
|
|
// Ensure that we have a slot, but if we already do, remember
|
2011-08-26 16:02:37 +08:00
|
|
|
// whether it was externally destructed.
|
|
|
|
bool wasExternallyDestructed = Dest.isExternallyDestructed();
|
2010-09-15 18:14:12 +08:00
|
|
|
Dest = EnsureSlot(E->getType());
|
2011-08-26 16:02:37 +08:00
|
|
|
|
|
|
|
// We're going to push a destructor if there isn't already one.
|
|
|
|
Dest.setExternallyDestructed();
|
2010-09-15 18:14:12 +08:00
|
|
|
|
|
|
|
Visit(E->getSubExpr());
|
|
|
|
|
2011-08-26 16:02:37 +08:00
|
|
|
// Push that destructor we promised.
|
|
|
|
if (!wasExternallyDestructed)
|
2011-11-28 06:09:22 +08:00
|
|
|
CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
|
2009-05-31 07:23:33 +08:00
|
|
|
}
|
|
|
|
|
2009-04-17 08:06:03 +08:00
|
|
|
void
|
2009-05-04 01:47:16 +08:00
|
|
|
AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
|
2010-09-15 18:14:12 +08:00
|
|
|
AggValueSlot Slot = EnsureSlot(E->getType());
|
|
|
|
CGF.EmitCXXConstructExpr(E, Slot);
|
2009-05-19 12:48:36 +08:00
|
|
|
}
|
|
|
|
|
2012-02-09 11:32:31 +08:00
|
|
|
void
|
|
|
|
AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
|
|
|
|
AggValueSlot Slot = EnsureSlot(E->getType());
|
|
|
|
CGF.EmitLambdaExpr(E, Slot);
|
|
|
|
}
|
|
|
|
|
2010-12-06 16:20:24 +08:00
|
|
|
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
|
2011-11-10 16:15:53 +08:00
|
|
|
CGF.enterFullExpression(E);
|
|
|
|
CodeGenFunction::RunCleanupsScope cleanups(CGF);
|
|
|
|
Visit(E->getSubExpr());
|
2009-04-17 08:06:03 +08:00
|
|
|
}
|
|
|
|
|
2010-07-08 14:14:04 +08:00
|
|
|
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
|
2010-09-15 18:14:12 +08:00
|
|
|
QualType T = E->getType();
|
|
|
|
AggValueSlot Slot = EnsureSlot(T);
|
2011-06-16 12:16:24 +08:00
|
|
|
EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
|
2009-12-16 14:57:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
|
2010-09-15 18:14:12 +08:00
|
|
|
QualType T = E->getType();
|
|
|
|
AggValueSlot Slot = EnsureSlot(T);
|
2011-06-16 12:16:24 +08:00
|
|
|
EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
|
2009-10-18 23:18:11 +08:00
|
|
|
}
|
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
/// isSimpleZero - If emitting this value will obviously just cause a store of
|
|
|
|
/// zero to memory, return true. This can return false if uncertain, so it just
|
|
|
|
/// handles simple cases.
|
|
|
|
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
|
2011-04-15 08:35:48 +08:00
|
|
|
E = E->IgnoreParens();
|
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
// 0
|
|
|
|
if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
|
|
|
|
return IL->getValue() == 0;
|
|
|
|
// +0.0
|
|
|
|
if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
|
|
|
|
return FL->getValue().isPosZero();
|
|
|
|
// int()
|
|
|
|
if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
|
|
|
|
CGF.getTypes().isZeroInitializable(E->getType()))
|
|
|
|
return true;
|
|
|
|
// (int*)0 - Null pointer expressions.
|
|
|
|
if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
|
|
|
|
return ICE->getCastKind() == CK_NullToPointer;
|
|
|
|
// '\0'
|
|
|
|
if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
|
|
|
|
return CL->getValue() == 0;
|
|
|
|
|
|
|
|
// Otherwise, hard case: conservatively return false.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-04 01:33:16 +08:00
|
|
|
void
|
2011-06-16 12:16:24 +08:00
|
|
|
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
|
|
|
|
QualType type = LV.getType();
|
2009-05-29 23:46:01 +08:00
|
|
|
// FIXME: Ignore result?
|
2008-04-05 02:42:16 +08:00
|
|
|
// FIXME: Are initializers affected by volatile?
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
|
|
|
|
// Storing "i32 0" to a zero'd memory location is a noop.
|
|
|
|
} else if (isa<ImplicitValueInitExpr>(E)) {
|
2011-06-16 12:16:24 +08:00
|
|
|
EmitNullInitializationToLValue(LV);
|
|
|
|
} else if (type->isReferenceType()) {
|
2010-06-27 00:35:32 +08:00
|
|
|
RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
|
2011-06-25 10:11:03 +08:00
|
|
|
CGF.EmitStoreThroughLValue(RV, LV);
|
2011-06-16 12:16:24 +08:00
|
|
|
} else if (type->isAnyComplexType()) {
|
2009-01-30 01:44:32 +08:00
|
|
|
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
|
2011-06-16 12:16:24 +08:00
|
|
|
} else if (CGF.hasAggregateLLVMType(type)) {
|
2011-08-26 04:40:09 +08:00
|
|
|
CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
|
|
|
|
AggValueSlot::IsDestructed,
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2011-08-26 07:04:34 +08:00
|
|
|
AggValueSlot::IsNotAliased,
|
2011-06-16 12:16:24 +08:00
|
|
|
Dest.isZeroed()));
|
2011-06-16 07:02:42 +08:00
|
|
|
} else if (LV.isSimple()) {
|
2011-06-16 12:16:24 +08:00
|
|
|
CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
|
2008-05-12 23:06:05 +08:00
|
|
|
} else {
|
2011-06-25 10:11:03 +08:00
|
|
|
CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
|
2008-04-05 02:42:16 +08:00
|
|
|
}
|
|
|
|
}
|
2008-02-19 06:44:02 +08:00
|
|
|
|
2011-06-16 12:16:24 +08:00
|
|
|
void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
|
|
|
|
QualType type = lv.getType();
|
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
// If the destination slot is already zeroed out before the aggregate is
|
|
|
|
// copied into it, we don't have to emit any zeros here.
|
2011-06-16 12:16:24 +08:00
|
|
|
if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
return;
|
|
|
|
|
2011-06-16 12:16:24 +08:00
|
|
|
if (!CGF.hasAggregateLLVMType(type)) {
|
2012-02-22 13:38:59 +08:00
|
|
|
// For non-aggregates, we can store zero.
|
2011-06-16 12:16:24 +08:00
|
|
|
llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
|
2012-02-22 13:38:59 +08:00
|
|
|
// Note that the following is not equivalent to
|
|
|
|
// EmitStoreThroughBitfieldLValue for ARC types.
|
2012-02-25 07:53:49 +08:00
|
|
|
if (lv.isBitField()) {
|
2012-02-22 13:38:59 +08:00
|
|
|
CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
|
2012-02-25 07:53:49 +08:00
|
|
|
} else {
|
|
|
|
assert(lv.isSimple());
|
|
|
|
CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
|
|
|
|
}
|
2008-04-05 02:42:16 +08:00
|
|
|
} else {
|
|
|
|
// There's a potential optimization opportunity in combining
|
|
|
|
// memsets; that would be easy for arrays, but relatively
|
|
|
|
// difficult for structures with the current code.
|
2011-06-16 12:16:24 +08:00
|
|
|
CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
|
2008-04-05 02:42:16 +08:00
|
|
|
}
|
|
|
|
}
|
2008-02-19 06:44:02 +08:00
|
|
|
|
2008-04-05 02:42:16 +08:00
|
|
|
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
|
2008-12-02 09:17:45 +08:00
|
|
|
#if 0
|
2009-12-04 09:30:56 +08:00
|
|
|
// FIXME: Assess perf here? Figure out what cases are worth optimizing here
|
|
|
|
// (Length of globals? Chunks of zeroed-out space?).
|
2008-12-02 09:17:45 +08:00
|
|
|
//
|
2009-05-16 15:57:57 +08:00
|
|
|
// If we can, prefer a copy from a global; this is a lot less code for long
|
|
|
|
// globals, and it's easier for the current optimizers to analyze.
|
2009-12-04 09:30:56 +08:00
|
|
|
if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
|
2008-11-30 10:11:09 +08:00
|
|
|
llvm::GlobalVariable* GV =
|
2009-12-04 09:30:56 +08:00
|
|
|
new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
|
|
|
|
llvm::GlobalValue::InternalLinkage, C, "");
|
2010-08-21 11:15:20 +08:00
|
|
|
EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
|
2008-11-30 10:11:09 +08:00
|
|
|
return;
|
|
|
|
}
|
2008-12-02 09:17:45 +08:00
|
|
|
#endif
|
2010-09-06 08:11:41 +08:00
|
|
|
if (E->hadArrayRangeDesignator())
|
2009-01-30 03:42:23 +08:00
|
|
|
CGF.ErrorUnsupported(E, "GNU array range designator extension");
|
|
|
|
|
2012-02-17 16:42:25 +08:00
|
|
|
if (E->initializesStdInitializerList()) {
|
2012-02-19 20:28:02 +08:00
|
|
|
EmitStdInitializerList(Dest.getAddr(), E);
|
2012-02-17 16:42:25 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-02-29 08:00:28 +08:00
|
|
|
llvm::Value *DestPtr = EnsureSlot(E->getType()).getAddr();
|
2010-09-15 18:14:12 +08:00
|
|
|
|
2008-04-05 02:42:16 +08:00
|
|
|
// Handle initialization of an array.
|
|
|
|
if (E->getType()->isArrayType()) {
|
2008-07-27 06:37:01 +08:00
|
|
|
if (E->getNumInits() > 0) {
|
|
|
|
QualType T1 = E->getType();
|
|
|
|
QualType T2 = E->getInit(0)->getType();
|
2009-05-29 07:04:00 +08:00
|
|
|
if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
|
2008-07-27 06:37:01 +08:00
|
|
|
EmitAggLoadOfLValue(E->getInit(0));
|
|
|
|
return;
|
|
|
|
}
|
2008-05-20 01:51:16 +08:00
|
|
|
}
|
|
|
|
|
2012-02-23 10:25:10 +08:00
|
|
|
QualType elementType =
|
|
|
|
CGF.getContext().getAsArrayType(E->getType())->getElementType();
|
2011-07-09 09:37:26 +08:00
|
|
|
|
2012-02-17 16:42:25 +08:00
|
|
|
llvm::PointerType *APType =
|
|
|
|
cast<llvm::PointerType>(DestPtr->getType());
|
|
|
|
llvm::ArrayType *AType =
|
|
|
|
cast<llvm::ArrayType>(APType->getElementType());
|
2011-07-09 09:37:26 +08:00
|
|
|
|
2012-02-17 16:42:25 +08:00
|
|
|
EmitArrayInit(DestPtr, AType, elementType, E);
|
2008-02-19 06:44:02 +08:00
|
|
|
return;
|
2008-04-05 02:42:16 +08:00
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-04-05 02:42:16 +08:00
|
|
|
assert(E->getType()->isRecordType() && "Only support structs/unions here!");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-04-05 02:42:16 +08:00
|
|
|
// Do struct initialization; this code just sets each individual member
|
|
|
|
// to the approprate value. This makes bitfield support automatic;
|
|
|
|
// the disadvantage is that the generated code is more difficult for
|
|
|
|
// the optimizer, especially with bitfields.
|
|
|
|
unsigned NumInitElements = E->getNumInits();
|
2011-07-12 03:35:02 +08:00
|
|
|
RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
|
2010-09-06 08:13:11 +08:00
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
if (record->isUnion()) {
|
2009-01-30 00:53:55 +08:00
|
|
|
// Only initialize one field of a union. The field itself is
|
|
|
|
// specified by the initializer list.
|
|
|
|
if (!E->getInitializedFieldInUnion()) {
|
|
|
|
// Empty union; we have nothing to do.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-01-30 00:53:55 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
// Make sure that it's really an empty and not a failure of
|
|
|
|
// semantic analysis.
|
2011-07-12 03:35:02 +08:00
|
|
|
for (RecordDecl::field_iterator Field = record->field_begin(),
|
|
|
|
FieldEnd = record->field_end();
|
2009-01-30 00:53:55 +08:00
|
|
|
Field != FieldEnd; ++Field)
|
|
|
|
assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
|
|
|
|
#endif
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: volatility
|
|
|
|
FieldDecl *Field = E->getInitializedFieldInUnion();
|
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
|
2009-01-30 00:53:55 +08:00
|
|
|
if (NumInitElements) {
|
|
|
|
// Store the initializer into the field
|
2011-06-16 12:16:24 +08:00
|
|
|
EmitInitializationToLValue(E->getInit(0), FieldLoc);
|
2009-01-30 00:53:55 +08:00
|
|
|
} else {
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
// Default-initialize to null.
|
2011-06-16 12:16:24 +08:00
|
|
|
EmitNullInitializationToLValue(FieldLoc);
|
2009-01-30 00:53:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
// We'll need to enter cleanup scopes in case any of the member
|
|
|
|
// initializers throw an exception.
|
2011-07-23 18:55:15 +08:00
|
|
|
SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
|
2011-11-10 18:43:54 +08:00
|
|
|
llvm::Instruction *cleanupDominator = 0;
|
2011-07-12 03:35:02 +08:00
|
|
|
|
2008-04-05 02:42:16 +08:00
|
|
|
// Here we iterate over the fields; this makes it simpler to both
|
|
|
|
// default-initialize fields and skip over unnamed fields.
|
2011-07-12 03:35:02 +08:00
|
|
|
unsigned curInitIndex = 0;
|
|
|
|
for (RecordDecl::field_iterator field = record->field_begin(),
|
|
|
|
fieldEnd = record->field_end();
|
|
|
|
field != fieldEnd; ++field) {
|
|
|
|
// We're done once we hit the flexible array member.
|
|
|
|
if (field->getType()->isIncompleteArrayType())
|
2008-12-12 00:49:14 +08:00
|
|
|
break;
|
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
// Always skip anonymous bitfields.
|
|
|
|
if (field->isUnnamedBitfield())
|
2008-04-05 02:42:16 +08:00
|
|
|
continue;
|
2009-01-29 07:36:17 +08:00
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
// We're done if we reach the end of the explicit initializers, we
|
|
|
|
// have a zeroed object, and the rest of the fields are
|
|
|
|
// zero-initializable.
|
|
|
|
if (curInitIndex == NumInitElements && Dest.isZeroed() &&
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
CGF.getTypes().isZeroInitializable(E->getType()))
|
|
|
|
break;
|
|
|
|
|
2008-06-14 07:01:12 +08:00
|
|
|
// FIXME: volatility
|
2011-07-12 03:35:02 +08:00
|
|
|
LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
|
2009-05-28 03:54:11 +08:00
|
|
|
// We never generate write-barries for initialized fields.
|
2011-07-12 03:35:02 +08:00
|
|
|
LV.setNonGC(true);
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
if (curInitIndex < NumInitElements) {
|
2010-03-09 05:08:07 +08:00
|
|
|
// Store the initializer into the field.
|
2011-07-12 03:35:02 +08:00
|
|
|
EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
|
2008-04-05 02:42:16 +08:00
|
|
|
} else {
|
|
|
|
// We're out of initalizers; default-initialize to null
|
2011-07-12 03:35:02 +08:00
|
|
|
EmitNullInitializationToLValue(LV);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push a destructor if necessary.
|
|
|
|
// FIXME: if we have an array of structures, all explicitly
|
|
|
|
// initialized, we can end up pushing a linear number of cleanups.
|
|
|
|
bool pushedCleanup = false;
|
|
|
|
if (QualType::DestructionKind dtorKind
|
|
|
|
= field->getType().isDestructedType()) {
|
|
|
|
assert(LV.isSimple());
|
|
|
|
if (CGF.needsEHCleanup(dtorKind)) {
|
2011-11-10 18:43:54 +08:00
|
|
|
if (!cleanupDominator)
|
|
|
|
cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
|
|
|
|
|
2011-07-12 03:35:02 +08:00
|
|
|
CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
|
|
|
|
CGF.getDestroyer(dtorKind), false);
|
|
|
|
cleanups.push_back(CGF.EHStack.stable_begin());
|
|
|
|
pushedCleanup = true;
|
|
|
|
}
|
2008-04-05 02:42:16 +08:00
|
|
|
}
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
|
|
|
// If the GEP didn't get used because of a dead zero init or something
|
|
|
|
// else, clean it up for -O0 builds and general tidiness.
|
2011-07-12 03:35:02 +08:00
|
|
|
if (!pushedCleanup && LV.isSimple())
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
if (llvm::GetElementPtrInst *GEP =
|
2011-07-12 03:35:02 +08:00
|
|
|
dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
if (GEP->use_empty())
|
|
|
|
GEP->eraseFromParent();
|
2008-02-20 03:27:31 +08:00
|
|
|
}
|
2011-07-12 03:35:02 +08:00
|
|
|
|
|
|
|
// Deactivate all the partial cleanups in reverse order, which
|
|
|
|
// generally means popping them.
|
|
|
|
for (unsigned i = cleanups.size(); i != 0; --i)
|
2011-11-10 18:43:54 +08:00
|
|
|
CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
|
|
|
|
|
|
|
|
// Destroy the placeholder if we made one.
|
|
|
|
if (cleanupDominator)
|
|
|
|
cleanupDominator->eraseFromParent();
|
2007-10-27 01:44:44 +08:00
|
|
|
}
|
|
|
|
|
2007-08-21 12:59:27 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Entry Points into this File
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
|
|
|
|
/// non-zero bytes that will be stored when outputting the initializer for the
|
|
|
|
/// specified initializer expression.
|
2011-04-25 01:17:56 +08:00
|
|
|
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
|
2011-04-15 08:35:48 +08:00
|
|
|
E = E->IgnoreParens();
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
|
|
|
// 0 and 0.0 won't require any non-zero stores!
|
2011-04-25 01:17:56 +08:00
|
|
|
if (isSimpleZero(E, CGF)) return CharUnits::Zero();
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
|
|
|
// If this is an initlist expr, sum up the size of sizes of the (present)
|
|
|
|
// elements. If this is something weird, assume the whole thing is non-zero.
|
|
|
|
const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
|
|
|
|
if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
|
2011-04-25 01:17:56 +08:00
|
|
|
return CGF.getContext().getTypeSizeInChars(E->getType());
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
2010-12-03 02:29:00 +08:00
|
|
|
// InitListExprs for structs have to be handled carefully. If there are
|
|
|
|
// reference members, we need to consider the size of the reference, not the
|
|
|
|
// referencee. InitListExprs for unions and arrays can't have references.
|
2010-12-03 06:52:04 +08:00
|
|
|
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
|
|
|
|
if (!RT->isUnionType()) {
|
|
|
|
RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
|
2011-04-25 01:17:56 +08:00
|
|
|
CharUnits NumNonZeroBytes = CharUnits::Zero();
|
2010-12-03 02:29:00 +08:00
|
|
|
|
2010-12-03 06:52:04 +08:00
|
|
|
unsigned ILEElement = 0;
|
|
|
|
for (RecordDecl::field_iterator Field = SD->field_begin(),
|
|
|
|
FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
|
|
|
|
// We're done once we hit the flexible array member or run out of
|
|
|
|
// InitListExpr elements.
|
|
|
|
if (Field->getType()->isIncompleteArrayType() ||
|
|
|
|
ILEElement == ILE->getNumInits())
|
|
|
|
break;
|
|
|
|
if (Field->isUnnamedBitfield())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const Expr *E = ILE->getInit(ILEElement++);
|
|
|
|
|
|
|
|
// Reference values are always non-null and have the width of a pointer.
|
|
|
|
if (Field->getType()->isReferenceType())
|
2011-04-25 01:17:56 +08:00
|
|
|
NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
|
2011-09-02 08:18:52 +08:00
|
|
|
CGF.getContext().getTargetInfo().getPointerWidth(0));
|
2010-12-03 06:52:04 +08:00
|
|
|
else
|
|
|
|
NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
|
|
|
|
}
|
2010-12-03 02:29:00 +08:00
|
|
|
|
2010-12-03 06:52:04 +08:00
|
|
|
return NumNonZeroBytes;
|
2010-12-03 02:29:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-25 01:17:56 +08:00
|
|
|
CharUnits NumNonZeroBytes = CharUnits::Zero();
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
|
|
|
|
NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
|
|
|
|
return NumNonZeroBytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
|
|
|
|
/// zeros in it, emit a memset and avoid storing the individual zeros.
|
|
|
|
///
|
|
|
|
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
|
|
|
|
CodeGenFunction &CGF) {
|
|
|
|
// If the slot is already known to be zeroed, nothing to do. Don't mess with
|
|
|
|
// volatile stores.
|
|
|
|
if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
|
2011-04-29 06:57:55 +08:00
|
|
|
|
|
|
|
// C++ objects with a user-declared constructor don't need zero'ing.
|
|
|
|
if (CGF.getContext().getLangOptions().CPlusPlus)
|
|
|
|
if (const RecordType *RT = CGF.getContext()
|
|
|
|
.getBaseElementType(E->getType())->getAs<RecordType>()) {
|
|
|
|
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
|
|
|
|
if (RD->hasUserDeclaredConstructor())
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
// If the type is 16-bytes or smaller, prefer individual stores over memset.
|
2011-04-25 01:25:32 +08:00
|
|
|
std::pair<CharUnits, CharUnits> TypeInfo =
|
|
|
|
CGF.getContext().getTypeInfoInChars(E->getType());
|
|
|
|
if (TypeInfo.first <= CharUnits::fromQuantity(16))
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Check to see if over 3/4 of the initializer are known to be zero. If so,
|
|
|
|
// we prefer to emit memset + individual stores for the rest.
|
2011-04-25 01:25:32 +08:00
|
|
|
CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
|
|
|
|
if (NumNonZeroBytes*4 > TypeInfo.first)
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Okay, it seems like a good idea to use an initial memset, emit the call.
|
2011-04-25 01:25:32 +08:00
|
|
|
llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
|
|
|
|
CharUnits Align = TypeInfo.second;
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
|
|
|
llvm::Value *Loc = Slot.getAddr();
|
|
|
|
|
2012-02-07 08:39:47 +08:00
|
|
|
Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
|
2011-04-25 01:25:32 +08:00
|
|
|
CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
|
|
|
|
Align.getQuantity(), false);
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
|
|
|
// Tell the AggExprEmitter that the slot is known zero.
|
|
|
|
Slot.setZeroed();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2009-05-27 02:57:45 +08:00
|
|
|
/// EmitAggExpr - Emit the computation of the specified expression of aggregate
|
|
|
|
/// type. The result is computed into DestPtr. Note that if DestPtr is null,
|
|
|
|
/// the value of the aggregate expression is not needed. If VolatileDest is
|
|
|
|
/// true, DestPtr cannot be 0.
|
2010-09-15 18:14:12 +08:00
|
|
|
///
|
|
|
|
/// \param IsInitializer - true if this evaluation is initializing an
|
|
|
|
/// object whose lifetime is already being managed.
|
|
|
|
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
|
2010-09-16 08:20:07 +08:00
|
|
|
bool IgnoreResult) {
|
2007-08-21 12:59:27 +08:00
|
|
|
assert(E && hasAggregateLLVMType(E->getType()) &&
|
|
|
|
"Invalid aggregate expression to emit");
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
|
|
|
|
"slot has bits but no address");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
// Optimize the slot if possible.
|
|
|
|
CheckAggExprForMemSetUse(Slot, E, *this);
|
|
|
|
|
|
|
|
AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
|
2007-08-21 12:59:27 +08:00
|
|
|
}
|
2008-09-10 04:49:46 +08:00
|
|
|
|
2010-02-06 03:38:31 +08:00
|
|
|
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
|
|
|
|
assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
|
2010-02-09 10:48:28 +08:00
|
|
|
llvm::Value *Temp = CreateMemTemp(E->getType());
|
2010-08-21 11:15:20 +08:00
|
|
|
LValue LV = MakeAddrLValue(Temp, E->getType());
|
2011-08-26 04:40:09 +08:00
|
|
|
EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
|
2011-08-26 15:31:35 +08:00
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
|
|
|
AggValueSlot::IsNotAliased));
|
2010-08-21 11:15:20 +08:00
|
|
|
return LV;
|
2010-02-06 03:38:31 +08:00
|
|
|
}
|
|
|
|
|
2008-09-10 04:49:46 +08:00
|
|
|
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
|
2009-05-24 06:29:41 +08:00
|
|
|
llvm::Value *SrcPtr, QualType Ty,
|
2011-12-06 06:23:28 +08:00
|
|
|
bool isVolatile, unsigned Alignment) {
|
2008-09-10 04:49:46 +08:00
|
|
|
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2010-05-03 09:20:20 +08:00
|
|
|
if (getContext().getLangOptions().CPlusPlus) {
|
|
|
|
if (const RecordType *RT = Ty->getAs<RecordType>()) {
|
2010-05-20 23:39:01 +08:00
|
|
|
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
|
|
|
|
assert((Record->hasTrivialCopyConstructor() ||
|
2011-09-07 00:26:56 +08:00
|
|
|
Record->hasTrivialCopyAssignment() ||
|
|
|
|
Record->hasTrivialMoveConstructor() ||
|
|
|
|
Record->hasTrivialMoveAssignment()) &&
|
2010-05-20 23:39:01 +08:00
|
|
|
"Trying to aggregate-copy a type without a trivial copy "
|
|
|
|
"constructor or assignment operator");
|
2010-05-20 23:48:29 +08:00
|
|
|
// Ignore empty classes in C++.
|
2010-05-20 23:39:01 +08:00
|
|
|
if (Record->isEmpty())
|
2010-05-03 09:20:20 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-03-01 02:31:01 +08:00
|
|
|
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
|
2009-03-01 02:18:58 +08:00
|
|
|
// C99 6.5.16.1p3, which states "If the value being stored in an object is
|
|
|
|
// read from another object that overlaps in anyway the storage of the first
|
|
|
|
// object, then the overlap shall be exact and the two objects shall have
|
|
|
|
// qualified or unqualified versions of a compatible type."
|
|
|
|
//
|
2009-03-01 02:31:01 +08:00
|
|
|
// memcpy is not defined if the source and destination pointers are exactly
|
2009-03-01 02:18:58 +08:00
|
|
|
// equal, but other compilers do this optimization, and almost every memcpy
|
|
|
|
// implementation handles this case safely. If there is a libc that does not
|
|
|
|
// safely handle this, we can add a target hook.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2008-09-10 04:49:46 +08:00
|
|
|
// Get size and alignment info for this aggregate.
|
2011-04-25 01:37:26 +08:00
|
|
|
std::pair<CharUnits, CharUnits> TypeInfo =
|
|
|
|
getContext().getTypeInfoInChars(Ty);
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2011-12-06 06:23:28 +08:00
|
|
|
if (!Alignment)
|
|
|
|
Alignment = TypeInfo.second.getQuantity();
|
|
|
|
|
2008-09-10 04:49:46 +08:00
|
|
|
// FIXME: Handle variable sized types.
|
2009-09-09 23:08:12 +08:00
|
|
|
|
2009-05-23 12:13:59 +08:00
|
|
|
// FIXME: If we have a volatile struct, the optimizer can remove what might
|
|
|
|
// appear to be `extra' memory ops:
|
|
|
|
//
|
|
|
|
// volatile struct { int i; } a, b;
|
|
|
|
//
|
|
|
|
// int main() {
|
|
|
|
// a = b;
|
|
|
|
// a = b;
|
|
|
|
// }
|
|
|
|
//
|
2010-04-04 11:10:52 +08:00
|
|
|
// we need to use a different call here. We use isVolatile to indicate when
|
2009-05-27 06:03:21 +08:00
|
|
|
// either the source or the destination is volatile.
|
2010-04-04 11:10:52 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
|
|
|
|
llvm::Type *DBP =
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
|
2011-09-28 05:06:10 +08:00
|
|
|
DestPtr = Builder.CreateBitCast(DestPtr, DBP);
|
2010-04-04 11:10:52 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
|
|
|
|
llvm::Type *SBP =
|
2011-02-08 16:22:06 +08:00
|
|
|
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
|
2011-09-28 05:06:10 +08:00
|
|
|
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
|
2010-04-04 11:10:52 +08:00
|
|
|
|
2011-06-16 07:02:42 +08:00
|
|
|
// Don't do any of the memmove_collectable tests if GC isn't set.
|
2011-09-14 01:21:33 +08:00
|
|
|
if (CGM.getLangOptions().getGC() == LangOptions::NonGC) {
|
2011-06-16 07:02:42 +08:00
|
|
|
// fall through
|
|
|
|
} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
|
2010-06-16 06:44:06 +08:00
|
|
|
RecordDecl *Record = RecordTy->getDecl();
|
|
|
|
if (Record->hasObjectMember()) {
|
2011-04-25 01:37:26 +08:00
|
|
|
CharUnits size = TypeInfo.first;
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
|
2011-04-25 01:37:26 +08:00
|
|
|
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
|
2010-06-16 06:44:06 +08:00
|
|
|
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
|
|
|
|
SizeVal);
|
|
|
|
return;
|
|
|
|
}
|
2011-06-16 07:02:42 +08:00
|
|
|
} else if (Ty->isArrayType()) {
|
2010-06-16 06:44:06 +08:00
|
|
|
QualType BaseType = getContext().getBaseElementType(Ty);
|
|
|
|
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
|
|
|
|
if (RecordTy->getDecl()->hasObjectMember()) {
|
2011-04-25 01:37:26 +08:00
|
|
|
CharUnits size = TypeInfo.first;
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
|
2011-04-25 01:37:26 +08:00
|
|
|
llvm::Value *SizeVal =
|
|
|
|
llvm::ConstantInt::get(SizeTy, size.getQuantity());
|
2010-06-16 06:44:06 +08:00
|
|
|
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
|
|
|
|
SizeVal);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-30 08:13:21 +08:00
|
|
|
Builder.CreateMemCpy(DestPtr, SrcPtr,
|
2011-04-25 01:37:26 +08:00
|
|
|
llvm::ConstantInt::get(IntPtrTy,
|
|
|
|
TypeInfo.first.getQuantity()),
|
2011-12-06 06:23:28 +08:00
|
|
|
Alignment, isVolatile);
|
2008-09-10 04:49:46 +08:00
|
|
|
}
|
2012-02-17 16:42:25 +08:00
|
|
|
|
2012-02-20 00:03:09 +08:00
|
|
|
void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
|
|
|
|
const Expr *init) {
|
2012-02-17 16:42:25 +08:00
|
|
|
const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(init);
|
2012-02-20 00:03:09 +08:00
|
|
|
if (cleanups)
|
|
|
|
init = cleanups->getSubExpr();
|
2012-02-17 16:42:25 +08:00
|
|
|
|
|
|
|
if (isa<InitListExpr>(init) &&
|
|
|
|
cast<InitListExpr>(init)->initializesStdInitializerList()) {
|
|
|
|
// We initialized this std::initializer_list with an initializer list.
|
|
|
|
// A backing array was created. Push a cleanup for it.
|
2012-02-20 00:03:09 +08:00
|
|
|
EmitStdInitializerListCleanup(loc, cast<InitListExpr>(init));
|
2012-02-19 20:28:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void EmitRecursiveStdInitializerListCleanup(CodeGenFunction &CGF,
|
|
|
|
llvm::Value *arrayStart,
|
|
|
|
const InitListExpr *init) {
|
|
|
|
// Check if there are any recursive cleanups to do, i.e. if we have
|
|
|
|
// std::initializer_list<std::initializer_list<obj>> list = {{obj()}};
|
|
|
|
// then we need to destroy the inner array as well.
|
|
|
|
for (unsigned i = 0, e = init->getNumInits(); i != e; ++i) {
|
|
|
|
const InitListExpr *subInit = dyn_cast<InitListExpr>(init->getInit(i));
|
|
|
|
if (!subInit || !subInit->initializesStdInitializerList())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// This one needs to be destroyed. Get the address of the std::init_list.
|
|
|
|
llvm::Value *offset = llvm::ConstantInt::get(CGF.SizeTy, i);
|
|
|
|
llvm::Value *loc = CGF.Builder.CreateInBoundsGEP(arrayStart, offset,
|
|
|
|
"std.initlist");
|
|
|
|
CGF.EmitStdInitializerListCleanup(loc, subInit);
|
2012-02-17 16:42:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-19 20:28:02 +08:00
|
|
|
void CodeGenFunction::EmitStdInitializerListCleanup(llvm::Value *loc,
|
2012-02-17 16:42:25 +08:00
|
|
|
const InitListExpr *init) {
|
|
|
|
ASTContext &ctx = getContext();
|
|
|
|
QualType element = GetStdInitializerListElementType(init->getType());
|
|
|
|
unsigned numInits = init->getNumInits();
|
|
|
|
llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
|
|
|
|
QualType array =ctx.getConstantArrayType(element, size, ArrayType::Normal, 0);
|
|
|
|
QualType arrayPtr = ctx.getPointerType(array);
|
|
|
|
llvm::Type *arrayPtrType = ConvertType(arrayPtr);
|
|
|
|
|
|
|
|
// lvalue is the location of a std::initializer_list, which as its first
|
|
|
|
// element has a pointer to the array we want to destroy.
|
2012-02-19 20:28:02 +08:00
|
|
|
llvm::Value *startPointer = Builder.CreateStructGEP(loc, 0, "startPointer");
|
|
|
|
llvm::Value *startAddress = Builder.CreateLoad(startPointer, "startAddress");
|
2012-02-17 16:42:25 +08:00
|
|
|
|
2012-02-19 20:28:02 +08:00
|
|
|
::EmitRecursiveStdInitializerListCleanup(*this, startAddress, init);
|
|
|
|
|
|
|
|
llvm::Value *arrayAddress =
|
|
|
|
Builder.CreateBitCast(startAddress, arrayPtrType, "arrayAddress");
|
2012-02-17 16:42:25 +08:00
|
|
|
::EmitStdInitializerListCleanup(*this, array, arrayAddress, init);
|
|
|
|
}
|