forked from OSchip/llvm-project
Significantly simplify CGExprAgg's logic about ignored results:
if we want to ignore a result, the Dest will be null. Otherwise, we must copy into it. This means we need to ensure a slot when loading from a volatile l-value. With all that in place, fix a bug with chained assignments into __block variables of aggregate type where we were losing insight into the actual source of the value during the second assignment. llvm-svn: 159630
This commit is contained in:
parent
fc7c677c8d
commit
4e8ca4fa14
|
@ -109,15 +109,18 @@ void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
|
||||||
/// can have any type. The result is returned as an RValue struct.
|
/// can have any type. The result is returned as an RValue struct.
|
||||||
/// If this is an aggregate expression, AggSlot indicates where the
|
/// If this is an aggregate expression, AggSlot indicates where the
|
||||||
/// result should be returned.
|
/// result should be returned.
|
||||||
RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
|
RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
|
||||||
bool IgnoreResult) {
|
AggValueSlot aggSlot,
|
||||||
|
bool ignoreResult) {
|
||||||
if (!hasAggregateLLVMType(E->getType()))
|
if (!hasAggregateLLVMType(E->getType()))
|
||||||
return RValue::get(EmitScalarExpr(E, IgnoreResult));
|
return RValue::get(EmitScalarExpr(E, ignoreResult));
|
||||||
else if (E->getType()->isAnyComplexType())
|
else if (E->getType()->isAnyComplexType())
|
||||||
return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
|
return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
|
||||||
|
|
||||||
EmitAggExpr(E, AggSlot, IgnoreResult);
|
if (!ignoreResult && aggSlot.isIgnored())
|
||||||
return AggSlot.asRValue();
|
aggSlot = CreateAggTemp(E->getType(), "agg-temp");
|
||||||
|
EmitAggExpr(E, aggSlot);
|
||||||
|
return aggSlot.asRValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
|
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
|
||||||
|
|
|
@ -34,7 +34,6 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
|
||||||
CodeGenFunction &CGF;
|
CodeGenFunction &CGF;
|
||||||
CGBuilderTy &Builder;
|
CGBuilderTy &Builder;
|
||||||
AggValueSlot Dest;
|
AggValueSlot Dest;
|
||||||
bool IgnoreResult;
|
|
||||||
|
|
||||||
/// We want to use 'dest' as the return slot except under two
|
/// We want to use 'dest' as the return slot except under two
|
||||||
/// conditions:
|
/// conditions:
|
||||||
|
@ -56,12 +55,14 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
|
||||||
if (!Dest.isIgnored()) return Dest;
|
if (!Dest.isIgnored()) return Dest;
|
||||||
return CGF.CreateAggTemp(T, "agg.tmp.ensured");
|
return CGF.CreateAggTemp(T, "agg.tmp.ensured");
|
||||||
}
|
}
|
||||||
|
void EnsureDest(QualType T) {
|
||||||
|
if (!Dest.isIgnored()) return;
|
||||||
|
Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
|
AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest)
|
||||||
bool ignore)
|
: CGF(cgf), Builder(CGF.Builder), Dest(Dest) {
|
||||||
: CGF(cgf), Builder(CGF.Builder), Dest(Dest),
|
|
||||||
IgnoreResult(ignore) {
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
|
@ -74,9 +75,11 @@ public:
|
||||||
void EmitAggLoadOfLValue(const Expr *E);
|
void EmitAggLoadOfLValue(const Expr *E);
|
||||||
|
|
||||||
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
||||||
void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
|
void EmitFinalDestCopy(QualType type, const LValue &src);
|
||||||
void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false,
|
void EmitFinalDestCopy(QualType type, RValue src,
|
||||||
unsigned Alignment = 0);
|
CharUnits srcAlignment = CharUnits::Zero());
|
||||||
|
void EmitCopy(QualType type, const AggValueSlot &dest,
|
||||||
|
const AggValueSlot &src);
|
||||||
|
|
||||||
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
|
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
|
||||||
|
|
||||||
|
@ -119,7 +122,7 @@ public:
|
||||||
if (E->getDecl()->getType()->isReferenceType()) {
|
if (E->getDecl()->getType()->isReferenceType()) {
|
||||||
if (CodeGenFunction::ConstantEmission result
|
if (CodeGenFunction::ConstantEmission result
|
||||||
= CGF.tryEmitAsConstant(E)) {
|
= CGF.tryEmitAsConstant(E)) {
|
||||||
EmitFinalDestCopy(E, result.getReferenceLValue(CGF, E));
|
EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -171,7 +174,7 @@ public:
|
||||||
void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
|
void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
|
||||||
if (E->isGLValue()) {
|
if (E->isGLValue()) {
|
||||||
LValue LV = CGF.EmitPseudoObjectLValue(E);
|
LValue LV = CGF.EmitPseudoObjectLValue(E);
|
||||||
return EmitFinalDestCopy(E, LV);
|
return EmitFinalDestCopy(E->getType(), LV);
|
||||||
}
|
}
|
||||||
|
|
||||||
CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
|
CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
|
||||||
|
@ -198,7 +201,7 @@ public:
|
||||||
/// then loads the result into DestPtr.
|
/// then loads the result into DestPtr.
|
||||||
void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
|
void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
|
||||||
LValue LV = CGF.EmitLValue(E);
|
LValue LV = CGF.EmitLValue(E);
|
||||||
EmitFinalDestCopy(E, LV);
|
EmitFinalDestCopy(E->getType(), LV);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// \brief True if the given aggregate type requires special GC API calls.
|
/// \brief True if the given aggregate type requires special GC API calls.
|
||||||
|
@ -228,7 +231,7 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
|
||||||
/// If nothing interferes, this will cause the result to be emitted
|
/// If nothing interferes, this will cause the result to be emitted
|
||||||
/// directly into the return value slot. Otherwise, a final move
|
/// directly into the return value slot. Otherwise, a final move
|
||||||
/// will be performed.
|
/// will be performed.
|
||||||
void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
|
void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
|
||||||
if (shouldUseDestForReturnSlot()) {
|
if (shouldUseDestForReturnSlot()) {
|
||||||
// Logically, Dest.getAddr() should equal Src.getAggregateAddr().
|
// Logically, Dest.getAddr() should equal Src.getAggregateAddr().
|
||||||
// The possibility of undef rvalues complicates that a lot,
|
// The possibility of undef rvalues complicates that a lot,
|
||||||
|
@ -236,61 +239,58 @@ void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, do a final copy,
|
// Otherwise, copy from there to the destination.
|
||||||
assert(Dest.getAddr() != Src.getAggregateAddr());
|
assert(Dest.getAddr() != src.getAggregateAddr());
|
||||||
std::pair<CharUnits, CharUnits> TypeInfo =
|
std::pair<CharUnits, CharUnits> typeInfo =
|
||||||
CGF.getContext().getTypeInfoInChars(E->getType());
|
CGF.getContext().getTypeInfoInChars(E->getType());
|
||||||
CharUnits Alignment = std::min(TypeInfo.second, Dest.getAlignment());
|
EmitFinalDestCopy(E->getType(), src, typeInfo.second);
|
||||||
EmitFinalDestCopy(E, Src, /*Ignore*/ true, Alignment.getQuantity());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
||||||
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore,
|
void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src,
|
||||||
unsigned Alignment) {
|
CharUnits srcAlign) {
|
||||||
assert(Src.isAggregate() && "value must be aggregate value!");
|
assert(src.isAggregate() && "value must be aggregate value!");
|
||||||
|
LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign);
|
||||||
|
EmitFinalDestCopy(type, srcLV);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
||||||
|
void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
|
||||||
// If Dest is ignored, then we're evaluating an aggregate expression
|
// If Dest is ignored, then we're evaluating an aggregate expression
|
||||||
// in a context (like an expression statement) that doesn't care
|
// in a context that doesn't care about the result. Note that loads
|
||||||
// about the result. C says that an lvalue-to-rvalue conversion is
|
// from volatile l-values force the existence of a non-ignored
|
||||||
// performed in these cases; C++ says that it is not. In either
|
// destination.
|
||||||
// case, we don't actually need to do anything unless the value is
|
if (Dest.isIgnored())
|
||||||
// volatile.
|
return;
|
||||||
if (Dest.isIgnored()) {
|
|
||||||
if (!Src.isVolatileQualified() ||
|
|
||||||
CGF.CGM.getLangOpts().CPlusPlus ||
|
|
||||||
(IgnoreResult && Ignore))
|
|
||||||
return;
|
|
||||||
|
|
||||||
// If the source is volatile, we must read from it; to do that, we need
|
AggValueSlot srcAgg =
|
||||||
// some place to put it.
|
AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
|
||||||
Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
|
needsGC(type), AggValueSlot::IsAliased);
|
||||||
}
|
EmitCopy(type, Dest, srcAgg);
|
||||||
|
}
|
||||||
|
|
||||||
if (Dest.requiresGCollection()) {
|
/// Perform a copy from the source into the destination.
|
||||||
CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
|
///
|
||||||
llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
|
/// \param type - the type of the aggregate being copied; qualifiers are
|
||||||
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
|
/// ignored
|
||||||
|
void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
|
||||||
|
const AggValueSlot &src) {
|
||||||
|
if (dest.requiresGCollection()) {
|
||||||
|
CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
|
||||||
|
llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
|
||||||
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
|
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
|
||||||
Dest.getAddr(),
|
dest.getAddr(),
|
||||||
Src.getAggregateAddr(),
|
src.getAddr(),
|
||||||
SizeVal);
|
size);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the result of the assignment is used, copy the LHS there also.
|
// If the result of the assignment is used, copy the LHS there also.
|
||||||
// FIXME: Pass VolatileDest as well. I think we also need to merge volatile
|
// It's volatile if either side is. Use the minimum alignment of
|
||||||
// from the source as well, as we can't eliminate it if either operand
|
// the two sides.
|
||||||
// is volatile, unless copy has volatile for both source and destination..
|
CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type,
|
||||||
CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
|
dest.isVolatile() || src.isVolatile(),
|
||||||
Dest.isVolatile()|Src.isVolatileQualified(),
|
std::min(dest.getAlignment(), src.getAlignment()));
|
||||||
Alignment);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
|
||||||
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
|
|
||||||
assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
|
|
||||||
|
|
||||||
CharUnits Alignment = std::min(Src.getAlignment(), Dest.getAlignment());
|
|
||||||
EmitFinalDestCopy(E, Src.asAggregateRValue(), Ignore, Alignment.getQuantity());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static QualType GetStdInitializerListElementType(QualType T) {
|
static QualType GetStdInitializerListElementType(QualType T) {
|
||||||
|
@ -526,7 +526,7 @@ void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
|
||||||
}
|
}
|
||||||
|
|
||||||
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
|
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
|
||||||
EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
|
EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -582,7 +582,15 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
|
||||||
"should have been unpacked before we got here");
|
"should have been unpacked before we got here");
|
||||||
}
|
}
|
||||||
|
|
||||||
case CK_LValueToRValue: // hope for downstream optimization
|
case CK_LValueToRValue:
|
||||||
|
// If we're loading from a volatile type, force the destination
|
||||||
|
// into existence.
|
||||||
|
if (E->getSubExpr()->getType().isVolatileQualified()) {
|
||||||
|
EnsureDest(E->getType());
|
||||||
|
return Visit(E->getSubExpr());
|
||||||
|
}
|
||||||
|
// fallthrough
|
||||||
|
|
||||||
case CK_NoOp:
|
case CK_NoOp:
|
||||||
case CK_AtomicToNonAtomic:
|
case CK_AtomicToNonAtomic:
|
||||||
case CK_NonAtomicToAtomic:
|
case CK_NonAtomicToAtomic:
|
||||||
|
@ -676,7 +684,73 @@ void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
|
||||||
void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
|
void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
|
||||||
const BinaryOperator *E) {
|
const BinaryOperator *E) {
|
||||||
LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
|
LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
|
||||||
EmitFinalDestCopy(E, LV);
|
EmitFinalDestCopy(E->getType(), LV);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Is the value of the given expression possibly a reference to or
|
||||||
|
/// into a __block variable?
|
||||||
|
static bool isBlockVarRef(const Expr *E) {
|
||||||
|
// Make sure we look through parens.
|
||||||
|
E = E->IgnoreParens();
|
||||||
|
|
||||||
|
// Check for a direct reference to a __block variable.
|
||||||
|
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
|
||||||
|
const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
|
||||||
|
return (var && var->hasAttr<BlocksAttr>());
|
||||||
|
}
|
||||||
|
|
||||||
|
// More complicated stuff.
|
||||||
|
|
||||||
|
// Binary operators.
|
||||||
|
if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
|
||||||
|
// For an assignment or pointer-to-member operation, just care
|
||||||
|
// about the LHS.
|
||||||
|
if (op->isAssignmentOp() || op->isPtrMemOp())
|
||||||
|
return isBlockVarRef(op->getLHS());
|
||||||
|
|
||||||
|
// For a comma, just care about the RHS.
|
||||||
|
if (op->getOpcode() == BO_Comma)
|
||||||
|
return isBlockVarRef(op->getRHS());
|
||||||
|
|
||||||
|
// FIXME: pointer arithmetic?
|
||||||
|
return false;
|
||||||
|
|
||||||
|
// Check both sides of a conditional operator.
|
||||||
|
} else if (const AbstractConditionalOperator *op
|
||||||
|
= dyn_cast<AbstractConditionalOperator>(E)) {
|
||||||
|
return isBlockVarRef(op->getTrueExpr())
|
||||||
|
|| isBlockVarRef(op->getFalseExpr());
|
||||||
|
|
||||||
|
// OVEs are required to support BinaryConditionalOperators.
|
||||||
|
} else if (const OpaqueValueExpr *op
|
||||||
|
= dyn_cast<OpaqueValueExpr>(E)) {
|
||||||
|
if (const Expr *src = op->getSourceExpr())
|
||||||
|
return isBlockVarRef(src);
|
||||||
|
|
||||||
|
// Casts are necessary to get things like (*(int*)&var) = foo().
|
||||||
|
// We don't really care about the kind of cast here, except
|
||||||
|
// we don't want to look through l2r casts, because it's okay
|
||||||
|
// to get the *value* in a __block variable.
|
||||||
|
} else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
|
||||||
|
if (cast->getCastKind() == CK_LValueToRValue)
|
||||||
|
return false;
|
||||||
|
return isBlockVarRef(cast->getSubExpr());
|
||||||
|
|
||||||
|
// Handle unary operators. Again, just aggressively look through
|
||||||
|
// it, ignoring the operation.
|
||||||
|
} else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
|
||||||
|
return isBlockVarRef(uop->getSubExpr());
|
||||||
|
|
||||||
|
// Look into the base of a field access.
|
||||||
|
} else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
|
||||||
|
return isBlockVarRef(mem->getBase());
|
||||||
|
|
||||||
|
// Look into the base of a subscript.
|
||||||
|
} else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
|
||||||
|
return isBlockVarRef(sub->getBase());
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
|
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
|
||||||
|
@ -686,20 +760,26 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
|
||||||
E->getRHS()->getType())
|
E->getRHS()->getType())
|
||||||
&& "Invalid assignment");
|
&& "Invalid assignment");
|
||||||
|
|
||||||
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
|
// If the LHS might be a __block variable, and the RHS can
|
||||||
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
|
// potentially cause a block copy, we need to evaluate the RHS first
|
||||||
if (VD->hasAttr<BlocksAttr>() &&
|
// so that the assignment goes the right place.
|
||||||
E->getRHS()->HasSideEffects(CGF.getContext())) {
|
// This is pretty semantically fragile.
|
||||||
// When __block variable on LHS, the RHS must be evaluated first
|
if (isBlockVarRef(E->getLHS()) &&
|
||||||
// as it may change the 'forwarding' field via call to Block_copy.
|
E->getRHS()->HasSideEffects(CGF.getContext())) {
|
||||||
LValue RHS = CGF.EmitLValue(E->getRHS());
|
// Ensure that we have a destination, and evaluate the RHS into that.
|
||||||
LValue LHS = CGF.EmitLValue(E->getLHS());
|
EnsureDest(E->getRHS()->getType());
|
||||||
Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
|
Visit(E->getRHS());
|
||||||
needsGC(E->getLHS()->getType()),
|
|
||||||
AggValueSlot::IsAliased);
|
// Now emit the LHS and copy into it.
|
||||||
EmitFinalDestCopy(E, RHS, true);
|
LValue LHS = CGF.EmitLValue(E->getLHS());
|
||||||
return;
|
|
||||||
}
|
EmitCopy(E->getLHS()->getType(),
|
||||||
|
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
|
||||||
|
needsGC(E->getLHS()->getType()),
|
||||||
|
AggValueSlot::IsAliased),
|
||||||
|
Dest);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
LValue LHS = CGF.EmitLValue(E->getLHS());
|
LValue LHS = CGF.EmitLValue(E->getLHS());
|
||||||
|
|
||||||
|
@ -708,8 +788,10 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
|
||||||
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
|
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
|
||||||
needsGC(E->getLHS()->getType()),
|
needsGC(E->getLHS()->getType()),
|
||||||
AggValueSlot::IsAliased);
|
AggValueSlot::IsAliased);
|
||||||
CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
|
CGF.EmitAggExpr(E->getRHS(), LHSSlot);
|
||||||
EmitFinalDestCopy(E, LHS, true);
|
|
||||||
|
// Copy into the destination if the assignment isn't ignored.
|
||||||
|
EmitFinalDestCopy(E->getType(), LHS);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AggExprEmitter::
|
void AggExprEmitter::
|
||||||
|
@ -762,14 +844,14 @@ void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
|
EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
|
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
|
||||||
// Ensure that we have a slot, but if we already do, remember
|
// Ensure that we have a slot, but if we already do, remember
|
||||||
// whether it was externally destructed.
|
// whether it was externally destructed.
|
||||||
bool wasExternallyDestructed = Dest.isExternallyDestructed();
|
bool wasExternallyDestructed = Dest.isExternallyDestructed();
|
||||||
Dest = EnsureSlot(E->getType());
|
EnsureDest(E->getType());
|
||||||
|
|
||||||
// We're going to push a destructor if there isn't already one.
|
// We're going to push a destructor if there isn't already one.
|
||||||
Dest.setExternallyDestructed();
|
Dest.setExternallyDestructed();
|
||||||
|
@ -904,7 +986,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
|
||||||
llvm::GlobalVariable* GV =
|
llvm::GlobalVariable* GV =
|
||||||
new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
|
new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
|
||||||
llvm::GlobalValue::InternalLinkage, C, "");
|
llvm::GlobalValue::InternalLinkage, C, "");
|
||||||
EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
|
EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1164,8 +1246,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
|
||||||
/// type. The result is computed into DestPtr. Note that if DestPtr is null,
|
/// type. The result is computed into DestPtr. Note that if DestPtr is null,
|
||||||
/// the value of the aggregate expression is not needed. If VolatileDest is
|
/// the value of the aggregate expression is not needed. If VolatileDest is
|
||||||
/// true, DestPtr cannot be 0.
|
/// true, DestPtr cannot be 0.
|
||||||
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
|
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
|
||||||
bool IgnoreResult) {
|
|
||||||
assert(E && hasAggregateLLVMType(E->getType()) &&
|
assert(E && hasAggregateLLVMType(E->getType()) &&
|
||||||
"Invalid aggregate expression to emit");
|
"Invalid aggregate expression to emit");
|
||||||
assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
|
assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
|
||||||
|
@ -1174,7 +1255,7 @@ void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
|
||||||
// Optimize the slot if possible.
|
// Optimize the slot if possible.
|
||||||
CheckAggExprForMemSetUse(Slot, E, *this);
|
CheckAggExprForMemSetUse(Slot, E, *this);
|
||||||
|
|
||||||
AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
|
AggExprEmitter(*this, Slot).Visit(const_cast<Expr*>(E));
|
||||||
}
|
}
|
||||||
|
|
||||||
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
|
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
|
||||||
|
@ -1189,7 +1270,8 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
|
||||||
|
|
||||||
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
|
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
|
||||||
llvm::Value *SrcPtr, QualType Ty,
|
llvm::Value *SrcPtr, QualType Ty,
|
||||||
bool isVolatile, unsigned Alignment) {
|
bool isVolatile,
|
||||||
|
CharUnits alignment) {
|
||||||
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
|
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
|
||||||
|
|
||||||
if (getContext().getLangOpts().CPlusPlus) {
|
if (getContext().getLangOpts().CPlusPlus) {
|
||||||
|
@ -1222,8 +1304,8 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
|
||||||
std::pair<CharUnits, CharUnits> TypeInfo =
|
std::pair<CharUnits, CharUnits> TypeInfo =
|
||||||
getContext().getTypeInfoInChars(Ty);
|
getContext().getTypeInfoInChars(Ty);
|
||||||
|
|
||||||
if (!Alignment)
|
if (alignment.isZero())
|
||||||
Alignment = TypeInfo.second.getQuantity();
|
alignment = TypeInfo.second;
|
||||||
|
|
||||||
// FIXME: Handle variable sized types.
|
// FIXME: Handle variable sized types.
|
||||||
|
|
||||||
|
@ -1281,7 +1363,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
|
||||||
Builder.CreateMemCpy(DestPtr, SrcPtr,
|
Builder.CreateMemCpy(DestPtr, SrcPtr,
|
||||||
llvm::ConstantInt::get(IntPtrTy,
|
llvm::ConstantInt::get(IntPtrTy,
|
||||||
TypeInfo.first.getQuantity()),
|
TypeInfo.first.getQuantity()),
|
||||||
Alignment, isVolatile);
|
alignment.getQuantity(), isVolatile);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
|
void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
|
||||||
|
|
|
@ -389,7 +389,8 @@ public:
|
||||||
return AV;
|
return AV;
|
||||||
}
|
}
|
||||||
|
|
||||||
static AggValueSlot forLValue(LValue LV, IsDestructed_t isDestructed,
|
static AggValueSlot forLValue(const LValue &LV,
|
||||||
|
IsDestructed_t isDestructed,
|
||||||
NeedsGCBarriers_t needsGC,
|
NeedsGCBarriers_t needsGC,
|
||||||
IsAliased_t isAliased,
|
IsAliased_t isAliased,
|
||||||
IsZeroed_t isZeroed = IsNotZeroed) {
|
IsZeroed_t isZeroed = IsNotZeroed) {
|
||||||
|
|
|
@ -1566,6 +1566,7 @@ public:
|
||||||
return LValue::MakeAddr(V, T, Alignment, getContext(),
|
return LValue::MakeAddr(V, T, Alignment, getContext(),
|
||||||
CGM.getTBAAInfo(T));
|
CGM.getTBAAInfo(T));
|
||||||
}
|
}
|
||||||
|
|
||||||
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
|
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
|
||||||
CharUnits Alignment;
|
CharUnits Alignment;
|
||||||
if (!T->isIncompleteType())
|
if (!T->isIncompleteType())
|
||||||
|
@ -1622,8 +1623,8 @@ public:
|
||||||
///
|
///
|
||||||
/// \param IgnoreResult - True if the resulting value isn't used.
|
/// \param IgnoreResult - True if the resulting value isn't used.
|
||||||
RValue EmitAnyExpr(const Expr *E,
|
RValue EmitAnyExpr(const Expr *E,
|
||||||
AggValueSlot AggSlot = AggValueSlot::ignored(),
|
AggValueSlot aggSlot = AggValueSlot::ignored(),
|
||||||
bool IgnoreResult = false);
|
bool ignoreResult = false);
|
||||||
|
|
||||||
// EmitVAListRef - Emit a "reference" to a va_list; this is either the address
|
// EmitVAListRef - Emit a "reference" to a va_list; this is either the address
|
||||||
// or the value of the expression, depending on how va_list is defined.
|
// or the value of the expression, depending on how va_list is defined.
|
||||||
|
@ -1649,7 +1650,7 @@ public:
|
||||||
/// volatile.
|
/// volatile.
|
||||||
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
|
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
|
||||||
QualType EltTy, bool isVolatile=false,
|
QualType EltTy, bool isVolatile=false,
|
||||||
unsigned Alignment = 0);
|
CharUnits Alignment = CharUnits::Zero());
|
||||||
|
|
||||||
/// StartBlock - Start new block named N. If insert block is a dummy block
|
/// StartBlock - Start new block named N. If insert block is a dummy block
|
||||||
/// then reuse it.
|
/// then reuse it.
|
||||||
|
@ -2363,7 +2364,7 @@ public:
|
||||||
/// EmitAggExpr - Emit the computation of the specified expression
|
/// EmitAggExpr - Emit the computation of the specified expression
|
||||||
/// of aggregate type. The result is computed into the given slot,
|
/// of aggregate type. The result is computed into the given slot,
|
||||||
/// which may be null to indicate that the value is not needed.
|
/// which may be null to indicate that the value is not needed.
|
||||||
void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
|
void EmitAggExpr(const Expr *E, AggValueSlot AS);
|
||||||
|
|
||||||
/// EmitAggExprToLValue - Emit the computation of the specified expression of
|
/// EmitAggExprToLValue - Emit the computation of the specified expression of
|
||||||
/// aggregate type into a temporary LValue.
|
/// aggregate type into a temporary LValue.
|
||||||
|
|
|
@ -1,17 +1,66 @@
|
||||||
// RUN: %clang_cc1 %s -emit-llvm -o - -fblocks -triple x86_64-apple-darwin10 | FileCheck %s
|
// RUN: %clang_cc1 %s -emit-llvm -o - -fblocks -triple x86_64-apple-darwin10 | FileCheck %s
|
||||||
// rdar://9309454
|
|
||||||
|
|
||||||
typedef struct { int v; } RetType;
|
// CHECK: [[AGG:%.*]] = type { i32 }
|
||||||
|
typedef struct { int v; } Agg;
|
||||||
|
Agg makeAgg(void);
|
||||||
|
|
||||||
RetType func();
|
// When assigning into a __block variable, ensure that we compute that
|
||||||
|
// address *after* evaluating the RHS when the RHS has the capacity to
|
||||||
|
// cause a block copy. rdar://9309454
|
||||||
|
void test0() {
|
||||||
|
__block Agg a = {100};
|
||||||
|
|
||||||
int main () {
|
a = makeAgg();
|
||||||
__attribute__((__blocks__(byref))) RetType a = {100};
|
|
||||||
|
|
||||||
a = func();
|
|
||||||
}
|
}
|
||||||
// CHECK: [[C1:%.*]] = call i32 (...)* @func()
|
// CHECK: define void @test0()
|
||||||
// CHECK-NEXT: [[CO:%.*]] = getelementptr
|
// CHECK: [[A:%.*]] = alloca [[BYREF:%.*]], align 8
|
||||||
// CHECK-NEXT: store i32 [[C1]], i32* [[CO]]
|
// CHECK-NEXT: [[TEMP:%.*]] = alloca [[AGG]], align 4
|
||||||
// CHECK-NEXT: [[FORWARDING:%.*]] = getelementptr inbounds [[BR:%.*]]* [[A:%.*]], i32 0, i32 1
|
// CHECK: [[RESULT:%.*]] = call i32 @makeAgg()
|
||||||
// CHECK-NEXT: [[O:%.*]] = load [[BR]]** [[FORWARDING]]
|
// CHECK-NEXT: [[T0:%.*]] = getelementptr [[AGG]]* [[TEMP]], i32 0, i32 0
|
||||||
|
// CHECK-NEXT: store i32 [[RESULT]], i32* [[T0]]
|
||||||
|
// Check that we properly assign into the forwarding pointer.
|
||||||
|
// CHECK-NEXT: [[A_FORWARDING:%.*]] = getelementptr inbounds [[BYREF]]* [[A]], i32 0, i32 1
|
||||||
|
// CHECK-NEXT: [[T0:%.*]] = load [[BYREF]]** [[A_FORWARDING]]
|
||||||
|
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[BYREF]]* [[T0]], i32 0, i32 4
|
||||||
|
// CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8*
|
||||||
|
// CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8*
|
||||||
|
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T2]], i8* [[T3]], i64 4, i32 4, i1 false)
|
||||||
|
// Verify that there's nothing else significant in the function.
|
||||||
|
// CHECK-NEXT: [[T0:%.*]] = bitcast [[BYREF]]* [[A]] to i8*
|
||||||
|
// CHECK-NEXT: call void @_Block_object_dispose(i8* [[T0]], i32 8)
|
||||||
|
// CHECK-NEXT: ret void
|
||||||
|
|
||||||
|
// When chaining assignments into __block variables, make sure we
|
||||||
|
// propagate the actual value into the outer variable.
|
||||||
|
// rdar://11757470
|
||||||
|
void test1() {
|
||||||
|
__block Agg a, b;
|
||||||
|
a = b = makeAgg();
|
||||||
|
}
|
||||||
|
// CHECK: define void @test1()
|
||||||
|
// CHECK: [[A:%.*]] = alloca [[A_BYREF:%.*]], align 8
|
||||||
|
// CHECK-NEXT: [[B:%.*]] = alloca [[B_BYREF:%.*]], align 8
|
||||||
|
// CHECK-NEXT: [[TEMP:%.*]] = alloca [[AGG]], align 4
|
||||||
|
// CHECK: [[RESULT:%.*]] = call i32 @makeAgg()
|
||||||
|
// CHECK-NEXT: [[T0:%.*]] = getelementptr [[AGG]]* [[TEMP]], i32 0, i32 0
|
||||||
|
// CHECK-NEXT: store i32 [[RESULT]], i32* [[T0]]
|
||||||
|
// Check that we properly assign into the forwarding pointer, first for b:
|
||||||
|
// CHECK-NEXT: [[B_FORWARDING:%.*]] = getelementptr inbounds [[B_BYREF]]* [[B]], i32 0, i32 1
|
||||||
|
// CHECK-NEXT: [[T0:%.*]] = load [[B_BYREF]]** [[B_FORWARDING]]
|
||||||
|
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[B_BYREF]]* [[T0]], i32 0, i32 4
|
||||||
|
// CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8*
|
||||||
|
// CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8*
|
||||||
|
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T2]], i8* [[T3]], i64 4, i32 4, i1 false)
|
||||||
|
// Then for 'a':
|
||||||
|
// CHECK-NEXT: [[A_FORWARDING:%.*]] = getelementptr inbounds [[A_BYREF]]* [[A]], i32 0, i32 1
|
||||||
|
// CHECK-NEXT: [[T0:%.*]] = load [[A_BYREF]]** [[A_FORWARDING]]
|
||||||
|
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[A_BYREF]]* [[T0]], i32 0, i32 4
|
||||||
|
// CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8*
|
||||||
|
// CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8*
|
||||||
|
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T2]], i8* [[T3]], i64 4, i32 4, i1 false)
|
||||||
|
// Verify that there's nothing else significant in the function.
|
||||||
|
// CHECK-NEXT: [[T0:%.*]] = bitcast [[B_BYREF]]* [[B]] to i8*
|
||||||
|
// CHECK-NEXT: call void @_Block_object_dispose(i8* [[T0]], i32 8)
|
||||||
|
// CHECK-NEXT: [[T0:%.*]] = bitcast [[A_BYREF]]* [[A]] to i8*
|
||||||
|
// CHECK-NEXT: call void @_Block_object_dispose(i8* [[T0]], i32 8)
|
||||||
|
// CHECK-NEXT: ret void
|
||||||
|
|
Loading…
Reference in New Issue