Clean carriage returns from lib/ and include/. NFC.

Summary:
Clean carriage returns from lib/ and include/. NFC.
(I have to make this change locally in order for `git diff` to show sane output after I edit a file, so I might as well ask for it to be committed. I don't have commit privs myself.)
(Without this patch, `git rebase`ing any change involving SemaDeclCXX.cpp is a real nightmare. :( So while I have no right to ask for this to be committed, geez would it make my workflow easier if it were.)

Here's the command I used to reformat things. (Requires bash and OSX/FreeBSD sed.)

    git grep -l $'\r' lib include | xargs sed -i -e $'s/\r//'
    find lib include -name '*-e' -delete

Reviewers: malcolm.parsons

Reviewed By: malcolm.parsons

Subscribers: emaste, krytarowski, cfe-commits

Differential Revision: https://reviews.llvm.org/D45591

Patch by Arthur O'Dwyer.

llvm-svn: 330112
This commit is contained in:
Malcolm Parsons 2018-04-16 08:31:08 +00:00
parent 6ea89b4041
commit fab3680990
8 changed files with 445 additions and 445 deletions

View File

@ -2213,14 +2213,14 @@ void ASTDumper::VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) {
}
void ASTDumper::VisitUnaryOperator(const UnaryOperator *Node) {
VisitExpr(Node);
OS << " " << (Node->isPostfix() ? "postfix" : "prefix")
<< " '" << UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
if (!Node->canOverflow())
OS << " cannot overflow";
}
void ASTDumper::VisitUnaryExprOrTypeTraitExpr(
VisitExpr(Node);
OS << " " << (Node->isPostfix() ? "postfix" : "prefix")
<< " '" << UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
if (!Node->canOverflow())
OS << " cannot overflow";
}
void ASTDumper::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *Node) {
VisitExpr(Node);
switch(Node->getKind()) {

View File

@ -3328,12 +3328,12 @@ static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal,
}
CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType);
return Obj && modifySubobject(Info, E, Obj, LVal.Designator, Val);
}
namespace {
struct CompoundAssignSubobjectHandler {
EvalInfo &Info;
return Obj && modifySubobject(Info, E, Obj, LVal.Designator, Val);
}
namespace {
struct CompoundAssignSubobjectHandler {
EvalInfo &Info;
const Expr *E;
QualType PromotedLHSType;
BinaryOperatorKind Opcode;
@ -3449,13 +3449,13 @@ static bool handleCompoundAssignment(
return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler);
}
namespace {
struct IncDecSubobjectHandler {
EvalInfo &Info;
const UnaryOperator *E;
AccessKinds AccessKind;
APValue *Old;
namespace {
struct IncDecSubobjectHandler {
EvalInfo &Info;
const UnaryOperator *E;
AccessKinds AccessKind;
APValue *Old;
typedef bool result_type;
bool checkConst(QualType QT) {
@ -3521,20 +3521,20 @@ struct IncDecSubobjectHandler {
}
bool WasNegative = Value.isNegative();
if (AccessKind == AK_Increment) {
++Value;
if (!WasNegative && Value.isNegative() && E->canOverflow()) {
APSInt ActualValue(Value, /*IsUnsigned*/true);
return HandleOverflow(Info, E, ActualValue, SubobjType);
}
} else {
--Value;
if (WasNegative && !Value.isNegative() && E->canOverflow()) {
unsigned BitWidth = Value.getBitWidth();
APSInt ActualValue(Value.sext(BitWidth + 1), /*IsUnsigned*/false);
ActualValue.setBit(BitWidth);
if (AccessKind == AK_Increment) {
++Value;
if (!WasNegative && Value.isNegative() && E->canOverflow()) {
APSInt ActualValue(Value, /*IsUnsigned*/true);
return HandleOverflow(Info, E, ActualValue, SubobjType);
}
} else {
--Value;
if (WasNegative && !Value.isNegative() && E->canOverflow()) {
unsigned BitWidth = Value.getBitWidth();
APSInt ActualValue(Value.sext(BitWidth + 1), /*IsUnsigned*/false);
ActualValue.setBit(BitWidth);
return HandleOverflow(Info, E, ActualValue, SubobjType);
}
}
@ -3589,13 +3589,13 @@ static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal,
Info.FFDiag(E);
return false;
}
AccessKinds AK = IsIncrement ? AK_Increment : AK_Decrement;
CompleteObject Obj = findCompleteObject(Info, E, AK, LVal, LValType);
IncDecSubobjectHandler Handler = {Info, cast<UnaryOperator>(E), AK, Old};
return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler);
}
AccessKinds AK = IsIncrement ? AK_Increment : AK_Decrement;
CompleteObject Obj = findCompleteObject(Info, E, AK, LVal, LValType);
IncDecSubobjectHandler Handler = {Info, cast<UnaryOperator>(E), AK, Old};
return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler);
}
/// Build an lvalue for the object argument of a member function call.
static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
LValue &This) {
@ -9006,13 +9006,13 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
return Visit(E->getSubExpr());
case UO_Minus: {
if (!Visit(E->getSubExpr()))
return false;
if (!Result.isInt()) return Error(E);
const APSInt &Value = Result.getInt();
if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow() &&
!HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
E->getType()))
return false;
return false;
if (!Result.isInt()) return Error(E);
const APSInt &Value = Result.getInt();
if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow() &&
!HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
E->getType()))
return false;
return Success(-Value, E);
}
case UO_Not: {

View File

@ -162,13 +162,13 @@ static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
// we can elide the overflow check.
if (!Op.mayHaveIntegerOverflow())
return true;
// If a unary op has a widened operand, the op cannot overflow.
if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
return !UO->canOverflow();
// We usually don't need overflow checks for binops with widened operands.
// Multiplication with promoted unsigned operands is a special case.
// If a unary op has a widened operand, the op cannot overflow.
if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
return !UO->canOverflow();
// We usually don't need overflow checks for binops with widened operands.
// Multiplication with promoted unsigned operands is a special case.
const auto *BO = cast<BinaryOperator>(Op.E);
auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
if (!OptionalLHSTy)
@ -1871,13 +1871,13 @@ llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
return Builder.CreateAdd(InVal, Amount, Name);
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(InVal, Amount, Name);
// Fall through.
case LangOptions::SOB_Trapping:
if (!E->canOverflow())
return Builder.CreateNSWAdd(InVal, Amount, Name);
return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
}
return Builder.CreateNSWAdd(InVal, Amount, Name);
// Fall through.
case LangOptions::SOB_Trapping:
if (!E->canOverflow())
return Builder.CreateNSWAdd(InVal, Amount, Name);
return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
}
llvm_unreachable("Unknown SignedOverflowBehaviorTy");
}
@ -1953,15 +1953,15 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
value = Builder.getTrue();
// Most common case by far: integer increment.
} else if (type->isIntegerType()) {
// Note that signed integer inc/dec with width less than int can't
// overflow because of promotion rules; we're just eliding a few steps here.
if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
} else if (E->canOverflow() && type->isUnsignedIntegerType() &&
CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
value =
EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
} else if (type->isIntegerType()) {
// Note that signed integer inc/dec with width less than int can't
// overflow because of promotion rules; we're just eliding a few steps here.
if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
} else if (E->canOverflow() && type->isUnsignedIntegerType() &&
CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
value =
EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
} else {
llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");

View File

@ -11453,18 +11453,18 @@ buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
Expr *Comparison
= new (S.Context) BinaryOperator(IterationVarRefRVal.build(S, Loc),
IntegerLiteral::Create(S.Context, Upper, SizeType, Loc),
BO_NE, S.Context.BoolTy,
VK_RValue, OK_Ordinary, Loc, FPOptions());
// Create the pre-increment of the iteration variable. We can determine
// whether the increment will overflow based on the value of the array
// bound.
Expr *Increment = new (S.Context)
UnaryOperator(IterationVarRef.build(S, Loc), UO_PreInc, SizeType,
VK_LValue, OK_Ordinary, Loc, Upper.isMaxValue());
// Construct the loop that copies all elements of this array.
return S.ActOnForStmt(
BO_NE, S.Context.BoolTy,
VK_RValue, OK_Ordinary, Loc, FPOptions());
// Create the pre-increment of the iteration variable. We can determine
// whether the increment will overflow based on the value of the array
// bound.
Expr *Increment = new (S.Context)
UnaryOperator(IterationVarRef.build(S, Loc), UO_PreInc, SizeType,
VK_LValue, OK_Ordinary, Loc, Upper.isMaxValue());
// Construct the loop that copies all elements of this array.
return S.ActOnForStmt(
Loc, Loc, InitStmt,
S.ActOnCondition(nullptr, Loc, Comparison, Sema::ConditionKind::Boolean),
S.MakeFullDiscardedValueExpr(Increment), Loc, Copy.get());

View File

@ -535,18 +535,18 @@ PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
addSemanticExpr(result.get());
if (UnaryOperator::isPrefix(opcode) && !captureSetValueAsResult() &&
!result.get()->getType()->isVoidType() &&
(result.get()->isTypeDependent() || CanCaptureValue(result.get())))
setResultToLastSemantic();
UnaryOperator *syntactic = new (S.Context) UnaryOperator(
syntacticOp, opcode, resultType, VK_LValue, OK_Ordinary, opcLoc,
!resultType->isDependentType()
? S.Context.getTypeSize(resultType) >=
S.Context.getTypeSize(S.Context.IntTy)
: false);
return complete(syntactic);
}
(result.get()->isTypeDependent() || CanCaptureValue(result.get())))
setResultToLastSemantic();
UnaryOperator *syntactic = new (S.Context) UnaryOperator(
syntacticOp, opcode, resultType, VK_LValue, OK_Ordinary, opcLoc,
!resultType->isDependentType()
? S.Context.getTypeSize(resultType) >=
S.Context.getTypeSize(S.Context.IntTy)
: false);
return complete(syntactic);
}
//===----------------------------------------------------------------------===//
// Objective-C @property and implicit property references
@ -1645,15 +1645,15 @@ static Expr *stripOpaqueValuesFromPseudoObjectRef(Sema &S, Expr *E) {
/// capable of rebuilding a tree without stripping implicit
/// operations.
Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) {
Expr *syntax = E->getSyntacticForm();
if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) {
Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr());
return new (Context) UnaryOperator(
op, uop->getOpcode(), uop->getType(), uop->getValueKind(),
uop->getObjectKind(), uop->getOperatorLoc(), uop->canOverflow());
} else if (CompoundAssignOperator *cop
= dyn_cast<CompoundAssignOperator>(syntax)) {
Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS());
Expr *syntax = E->getSyntacticForm();
if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) {
Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr());
return new (Context) UnaryOperator(
op, uop->getOpcode(), uop->getType(), uop->getValueKind(),
uop->getObjectKind(), uop->getOperatorLoc(), uop->canOverflow());
} else if (CompoundAssignOperator *cop
= dyn_cast<CompoundAssignOperator>(syntax)) {
Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(cop->getRHS())->getSourceExpr();
return new (Context) CompoundAssignOperator(lhs, rhs, cop->getOpcode(),
cop->getType(),

View File

@ -983,9 +983,9 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
auto CheckValidDeclSpecifiers = [this, &D] {
// C++ [temp.param]
// p1
// template-parameter:
// ...
// parameter-declaration
// template-parameter:
// ...
// parameter-declaration
// p2
// ... A storage class shall not be specified in a template-parameter
// declaration.

View File

@ -506,13 +506,13 @@ void ASTStmtWriter::VisitParenListExpr(ParenListExpr *E) {
void ASTStmtWriter::VisitUnaryOperator(UnaryOperator *E) {
VisitExpr(E);
Record.AddStmt(E->getSubExpr());
Record.push_back(E->getOpcode()); // FIXME: stable encoding
Record.AddSourceLocation(E->getOperatorLoc());
Record.push_back(E->canOverflow());
Code = serialization::EXPR_UNARY_OPERATOR;
}
Record.AddStmt(E->getSubExpr());
Record.push_back(E->getOpcode()); // FIXME: stable encoding
Record.AddSourceLocation(E->getOperatorLoc());
Record.push_back(E->canOverflow());
Code = serialization::EXPR_UNARY_OPERATOR;
}
void ASTStmtWriter::VisitOffsetOfExpr(OffsetOfExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumComponents());

View File

@ -1,330 +1,330 @@
//=======- PaddingChecker.cpp ------------------------------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a checker that checks for padding that could be
// removed by re-ordering members.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <numeric>
using namespace clang;
using namespace ento;
namespace {
class PaddingChecker : public Checker<check::ASTDecl<TranslationUnitDecl>> {
private:
mutable std::unique_ptr<BugType> PaddingBug;
mutable int64_t AllowedPad;
mutable BugReporter *BR;
public:
void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
BugReporter &BRArg) const {
BR = &BRArg;
AllowedPad =
MGR.getAnalyzerOptions().getOptionAsInteger("AllowedPad", 24, this);
assert(AllowedPad >= 0 && "AllowedPad option should be non-negative");
// The calls to checkAST* from AnalysisConsumer don't
// visit template instantiations or lambda classes. We
// want to visit those, so we make our own RecursiveASTVisitor.
struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
const PaddingChecker *Checker;
bool shouldVisitTemplateInstantiations() const { return true; }
bool shouldVisitImplicitCode() const { return true; }
explicit LocalVisitor(const PaddingChecker *Checker) : Checker(Checker) {}
bool VisitRecordDecl(const RecordDecl *RD) {
Checker->visitRecord(RD);
return true;
}
bool VisitVarDecl(const VarDecl *VD) {
Checker->visitVariable(VD);
return true;
}
// TODO: Visit array new and mallocs for arrays.
};
LocalVisitor visitor(this);
visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
}
/// \brief Look for records of overly padded types. If padding *
/// PadMultiplier exceeds AllowedPad, then generate a report.
/// PadMultiplier is used to share code with the array padding
/// checker.
void visitRecord(const RecordDecl *RD, uint64_t PadMultiplier = 1) const {
if (shouldSkipDecl(RD))
return;
auto &ASTContext = RD->getASTContext();
const ASTRecordLayout &RL = ASTContext.getASTRecordLayout(RD);
assert(llvm::isPowerOf2_64(RL.getAlignment().getQuantity()));
CharUnits BaselinePad = calculateBaselinePad(RD, ASTContext, RL);
if (BaselinePad.isZero())
return;
CharUnits OptimalPad;
SmallVector<const FieldDecl *, 20> OptimalFieldsOrder;
std::tie(OptimalPad, OptimalFieldsOrder) =
calculateOptimalPad(RD, ASTContext, RL);
CharUnits DiffPad = PadMultiplier * (BaselinePad - OptimalPad);
if (DiffPad.getQuantity() <= AllowedPad) {
assert(!DiffPad.isNegative() && "DiffPad should not be negative");
// There is not enough excess padding to trigger a warning.
return;
}
reportRecord(RD, BaselinePad, OptimalPad, OptimalFieldsOrder);
}
/// \brief Look for arrays of overly padded types. If the padding of the
/// array type exceeds AllowedPad, then generate a report.
void visitVariable(const VarDecl *VD) const {
const ArrayType *ArrTy = VD->getType()->getAsArrayTypeUnsafe();
if (ArrTy == nullptr)
return;
uint64_t Elts = 0;
if (const ConstantArrayType *CArrTy = dyn_cast<ConstantArrayType>(ArrTy))
Elts = CArrTy->getSize().getZExtValue();
if (Elts == 0)
return;
const RecordType *RT = ArrTy->getElementType()->getAs<RecordType>();
if (RT == nullptr)
return;
// TODO: Recurse into the fields and base classes to see if any
// of those have excess padding.
visitRecord(RT->getDecl(), Elts);
}
bool shouldSkipDecl(const RecordDecl *RD) const {
auto Location = RD->getLocation();
// If the construct doesn't have a source file, then it's not something
// we want to diagnose.
if (!Location.isValid())
return true;
SrcMgr::CharacteristicKind Kind =
BR->getSourceManager().getFileCharacteristic(Location);
// Throw out all records that come from system headers.
if (Kind != SrcMgr::C_User)
return true;
// Not going to attempt to optimize unions.
if (RD->isUnion())
return true;
// How do you reorder fields if you haven't got any?
if (RD->field_empty())
return true;
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
// Tail padding with base classes ends up being very complicated.
// We will skip objects with base classes for now.
if (CXXRD->getNumBases() != 0)
return true;
// Virtual bases are complicated, skipping those for now.
if (CXXRD->getNumVBases() != 0)
return true;
// Can't layout a template, so skip it. We do still layout the
// instantiations though.
if (CXXRD->getTypeForDecl()->isDependentType())
return true;
if (CXXRD->getTypeForDecl()->isInstantiationDependentType())
return true;
}
auto IsTrickyField = [](const FieldDecl *FD) -> bool {
// Bitfield layout is hard.
if (FD->isBitField())
return true;
// Variable length arrays are tricky too.
QualType Ty = FD->getType();
if (Ty->isIncompleteArrayType())
return true;
return false;
};
if (std::any_of(RD->field_begin(), RD->field_end(), IsTrickyField))
return true;
return false;
}
static CharUnits calculateBaselinePad(const RecordDecl *RD,
const ASTContext &ASTContext,
const ASTRecordLayout &RL) {
CharUnits PaddingSum;
CharUnits Offset = ASTContext.toCharUnitsFromBits(RL.getFieldOffset(0));
for (const FieldDecl *FD : RD->fields()) {
// This checker only cares about the padded size of the
// field, and not the data size. If the field is a record
// with tail padding, then we won't put that number in our
// total because reordering fields won't fix that problem.
CharUnits FieldSize = ASTContext.getTypeSizeInChars(FD->getType());
auto FieldOffsetBits = RL.getFieldOffset(FD->getFieldIndex());
CharUnits FieldOffset = ASTContext.toCharUnitsFromBits(FieldOffsetBits);
PaddingSum += (FieldOffset - Offset);
Offset = FieldOffset + FieldSize;
}
PaddingSum += RL.getSize() - Offset;
return PaddingSum;
}
/// Optimal padding overview:
/// 1. Find a close approximation to where we can place our first field.
/// This will usually be at offset 0.
/// 2. Try to find the best field that can legally be placed at the current
/// offset.
/// a. "Best" is the largest alignment that is legal, but smallest size.
/// This is to account for overly aligned types.
/// 3. If no fields can fit, pad by rounding the current offset up to the
/// smallest alignment requirement of our fields. Measure and track the
// amount of padding added. Go back to 2.
/// 4. Increment the current offset by the size of the chosen field.
/// 5. Remove the chosen field from the set of future possibilities.
/// 6. Go back to 2 if there are still unplaced fields.
/// 7. Add tail padding by rounding the current offset up to the structure
/// alignment. Track the amount of padding added.
static std::pair<CharUnits, SmallVector<const FieldDecl *, 20>>
calculateOptimalPad(const RecordDecl *RD, const ASTContext &ASTContext,
const ASTRecordLayout &RL) {
struct FieldInfo {
CharUnits Align;
CharUnits Size;
const FieldDecl *Field;
bool operator<(const FieldInfo &RHS) const {
// Order from small alignments to large alignments,
// then large sizes to small sizes.
// then large field indices to small field indices
return std::make_tuple(Align, -Size,
Field ? -static_cast<int>(Field->getFieldIndex())
: 0) <
std::make_tuple(
RHS.Align, -RHS.Size,
RHS.Field ? -static_cast<int>(RHS.Field->getFieldIndex())
: 0);
}
};
SmallVector<FieldInfo, 20> Fields;
auto GatherSizesAndAlignments = [](const FieldDecl *FD) {
FieldInfo RetVal;
RetVal.Field = FD;
auto &Ctx = FD->getASTContext();
std::tie(RetVal.Size, RetVal.Align) =
Ctx.getTypeInfoInChars(FD->getType());
assert(llvm::isPowerOf2_64(RetVal.Align.getQuantity()));
if (auto Max = FD->getMaxAlignment())
RetVal.Align = std::max(Ctx.toCharUnitsFromBits(Max), RetVal.Align);
return RetVal;
};
std::transform(RD->field_begin(), RD->field_end(),
std::back_inserter(Fields), GatherSizesAndAlignments);
llvm::sort(Fields.begin(), Fields.end());
// This lets us skip over vptrs and non-virtual bases,
// so that we can just worry about the fields in our object.
// Note that this does cause us to miss some cases where we
// could pack more bytes in to a base class's tail padding.
CharUnits NewOffset = ASTContext.toCharUnitsFromBits(RL.getFieldOffset(0));
CharUnits NewPad;
SmallVector<const FieldDecl *, 20> OptimalFieldsOrder;
while (!Fields.empty()) {
unsigned TrailingZeros =
llvm::countTrailingZeros((unsigned long long)NewOffset.getQuantity());
// If NewOffset is zero, then countTrailingZeros will be 64. Shifting
// 64 will overflow our unsigned long long. Shifting 63 will turn
// our long long (and CharUnits internal type) negative. So shift 62.
long long CurAlignmentBits = 1ull << (std::min)(TrailingZeros, 62u);
CharUnits CurAlignment = CharUnits::fromQuantity(CurAlignmentBits);
FieldInfo InsertPoint = {CurAlignment, CharUnits::Zero(), nullptr};
auto CurBegin = Fields.begin();
auto CurEnd = Fields.end();
// In the typical case, this will find the last element
// of the vector. We won't find a middle element unless
// we started on a poorly aligned address or have an overly
// aligned field.
auto Iter = std::upper_bound(CurBegin, CurEnd, InsertPoint);
if (Iter != CurBegin) {
// We found a field that we can layout with the current alignment.
--Iter;
NewOffset += Iter->Size;
OptimalFieldsOrder.push_back(Iter->Field);
Fields.erase(Iter);
} else {
// We are poorly aligned, and we need to pad in order to layout another
// field. Round up to at least the smallest field alignment that we
// currently have.
CharUnits NextOffset = NewOffset.alignTo(Fields[0].Align);
NewPad += NextOffset - NewOffset;
NewOffset = NextOffset;
}
}
// Calculate tail padding.
CharUnits NewSize = NewOffset.alignTo(RL.getAlignment());
NewPad += NewSize - NewOffset;
return {NewPad, std::move(OptimalFieldsOrder)};
}
void reportRecord(
const RecordDecl *RD, CharUnits BaselinePad, CharUnits OptimalPad,
const SmallVector<const FieldDecl *, 20> &OptimalFieldsOrder) const {
if (!PaddingBug)
PaddingBug =
llvm::make_unique<BugType>(this, "Excessive Padding", "Performance");
SmallString<100> Buf;
llvm::raw_svector_ostream Os(Buf);
Os << "Excessive padding in '";
Os << QualType::getAsString(RD->getTypeForDecl(), Qualifiers(),
LangOptions())
<< "'";
if (auto *TSD = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
// TODO: make this show up better in the console output and in
// the HTML. Maybe just make it show up in HTML like the path
// diagnostics show.
SourceLocation ILoc = TSD->getPointOfInstantiation();
if (ILoc.isValid())
Os << " instantiated here: "
<< ILoc.printToString(BR->getSourceManager());
}
Os << " (" << BaselinePad.getQuantity() << " padding bytes, where "
<< OptimalPad.getQuantity() << " is optimal). \n"
<< "Optimal fields order: \n";
for (const auto *FD : OptimalFieldsOrder)
Os << FD->getName() << ", \n";
Os << "consider reordering the fields or adding explicit padding "
"members.";
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::create(RD, BR->getSourceManager());
auto Report = llvm::make_unique<BugReport>(*PaddingBug, Os.str(), CELoc);
Report->setDeclWithIssue(RD);
Report->addRange(RD->getSourceRange());
BR->emitReport(std::move(Report));
}
};
}
void ento::registerPaddingChecker(CheckerManager &Mgr) {
Mgr.registerChecker<PaddingChecker>();
}
//=======- PaddingChecker.cpp ------------------------------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a checker that checks for padding that could be
// removed by re-ordering members.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <numeric>
using namespace clang;
using namespace ento;
namespace {
class PaddingChecker : public Checker<check::ASTDecl<TranslationUnitDecl>> {
private:
mutable std::unique_ptr<BugType> PaddingBug;
mutable int64_t AllowedPad;
mutable BugReporter *BR;
public:
void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
BugReporter &BRArg) const {
BR = &BRArg;
AllowedPad =
MGR.getAnalyzerOptions().getOptionAsInteger("AllowedPad", 24, this);
assert(AllowedPad >= 0 && "AllowedPad option should be non-negative");
// The calls to checkAST* from AnalysisConsumer don't
// visit template instantiations or lambda classes. We
// want to visit those, so we make our own RecursiveASTVisitor.
struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
const PaddingChecker *Checker;
bool shouldVisitTemplateInstantiations() const { return true; }
bool shouldVisitImplicitCode() const { return true; }
explicit LocalVisitor(const PaddingChecker *Checker) : Checker(Checker) {}
bool VisitRecordDecl(const RecordDecl *RD) {
Checker->visitRecord(RD);
return true;
}
bool VisitVarDecl(const VarDecl *VD) {
Checker->visitVariable(VD);
return true;
}
// TODO: Visit array new and mallocs for arrays.
};
LocalVisitor visitor(this);
visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
}
/// \brief Look for records of overly padded types. If padding *
/// PadMultiplier exceeds AllowedPad, then generate a report.
/// PadMultiplier is used to share code with the array padding
/// checker.
void visitRecord(const RecordDecl *RD, uint64_t PadMultiplier = 1) const {
if (shouldSkipDecl(RD))
return;
auto &ASTContext = RD->getASTContext();
const ASTRecordLayout &RL = ASTContext.getASTRecordLayout(RD);
assert(llvm::isPowerOf2_64(RL.getAlignment().getQuantity()));
CharUnits BaselinePad = calculateBaselinePad(RD, ASTContext, RL);
if (BaselinePad.isZero())
return;
CharUnits OptimalPad;
SmallVector<const FieldDecl *, 20> OptimalFieldsOrder;
std::tie(OptimalPad, OptimalFieldsOrder) =
calculateOptimalPad(RD, ASTContext, RL);
CharUnits DiffPad = PadMultiplier * (BaselinePad - OptimalPad);
if (DiffPad.getQuantity() <= AllowedPad) {
assert(!DiffPad.isNegative() && "DiffPad should not be negative");
// There is not enough excess padding to trigger a warning.
return;
}
reportRecord(RD, BaselinePad, OptimalPad, OptimalFieldsOrder);
}
/// \brief Look for arrays of overly padded types. If the padding of the
/// array type exceeds AllowedPad, then generate a report.
void visitVariable(const VarDecl *VD) const {
const ArrayType *ArrTy = VD->getType()->getAsArrayTypeUnsafe();
if (ArrTy == nullptr)
return;
uint64_t Elts = 0;
if (const ConstantArrayType *CArrTy = dyn_cast<ConstantArrayType>(ArrTy))
Elts = CArrTy->getSize().getZExtValue();
if (Elts == 0)
return;
const RecordType *RT = ArrTy->getElementType()->getAs<RecordType>();
if (RT == nullptr)
return;
// TODO: Recurse into the fields and base classes to see if any
// of those have excess padding.
visitRecord(RT->getDecl(), Elts);
}
bool shouldSkipDecl(const RecordDecl *RD) const {
auto Location = RD->getLocation();
// If the construct doesn't have a source file, then it's not something
// we want to diagnose.
if (!Location.isValid())
return true;
SrcMgr::CharacteristicKind Kind =
BR->getSourceManager().getFileCharacteristic(Location);
// Throw out all records that come from system headers.
if (Kind != SrcMgr::C_User)
return true;
// Not going to attempt to optimize unions.
if (RD->isUnion())
return true;
// How do you reorder fields if you haven't got any?
if (RD->field_empty())
return true;
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
// Tail padding with base classes ends up being very complicated.
// We will skip objects with base classes for now.
if (CXXRD->getNumBases() != 0)
return true;
// Virtual bases are complicated, skipping those for now.
if (CXXRD->getNumVBases() != 0)
return true;
// Can't layout a template, so skip it. We do still layout the
// instantiations though.
if (CXXRD->getTypeForDecl()->isDependentType())
return true;
if (CXXRD->getTypeForDecl()->isInstantiationDependentType())
return true;
}
auto IsTrickyField = [](const FieldDecl *FD) -> bool {
// Bitfield layout is hard.
if (FD->isBitField())
return true;
// Variable length arrays are tricky too.
QualType Ty = FD->getType();
if (Ty->isIncompleteArrayType())
return true;
return false;
};
if (std::any_of(RD->field_begin(), RD->field_end(), IsTrickyField))
return true;
return false;
}
static CharUnits calculateBaselinePad(const RecordDecl *RD,
const ASTContext &ASTContext,
const ASTRecordLayout &RL) {
CharUnits PaddingSum;
CharUnits Offset = ASTContext.toCharUnitsFromBits(RL.getFieldOffset(0));
for (const FieldDecl *FD : RD->fields()) {
// This checker only cares about the padded size of the
// field, and not the data size. If the field is a record
// with tail padding, then we won't put that number in our
// total because reordering fields won't fix that problem.
CharUnits FieldSize = ASTContext.getTypeSizeInChars(FD->getType());
auto FieldOffsetBits = RL.getFieldOffset(FD->getFieldIndex());
CharUnits FieldOffset = ASTContext.toCharUnitsFromBits(FieldOffsetBits);
PaddingSum += (FieldOffset - Offset);
Offset = FieldOffset + FieldSize;
}
PaddingSum += RL.getSize() - Offset;
return PaddingSum;
}
/// Optimal padding overview:
/// 1. Find a close approximation to where we can place our first field.
/// This will usually be at offset 0.
/// 2. Try to find the best field that can legally be placed at the current
/// offset.
/// a. "Best" is the largest alignment that is legal, but smallest size.
/// This is to account for overly aligned types.
/// 3. If no fields can fit, pad by rounding the current offset up to the
/// smallest alignment requirement of our fields. Measure and track the
// amount of padding added. Go back to 2.
/// 4. Increment the current offset by the size of the chosen field.
/// 5. Remove the chosen field from the set of future possibilities.
/// 6. Go back to 2 if there are still unplaced fields.
/// 7. Add tail padding by rounding the current offset up to the structure
/// alignment. Track the amount of padding added.
static std::pair<CharUnits, SmallVector<const FieldDecl *, 20>>
calculateOptimalPad(const RecordDecl *RD, const ASTContext &ASTContext,
const ASTRecordLayout &RL) {
struct FieldInfo {
CharUnits Align;
CharUnits Size;
const FieldDecl *Field;
bool operator<(const FieldInfo &RHS) const {
// Order from small alignments to large alignments,
// then large sizes to small sizes.
// then large field indices to small field indices
return std::make_tuple(Align, -Size,
Field ? -static_cast<int>(Field->getFieldIndex())
: 0) <
std::make_tuple(
RHS.Align, -RHS.Size,
RHS.Field ? -static_cast<int>(RHS.Field->getFieldIndex())
: 0);
}
};
SmallVector<FieldInfo, 20> Fields;
auto GatherSizesAndAlignments = [](const FieldDecl *FD) {
FieldInfo RetVal;
RetVal.Field = FD;
auto &Ctx = FD->getASTContext();
std::tie(RetVal.Size, RetVal.Align) =
Ctx.getTypeInfoInChars(FD->getType());
assert(llvm::isPowerOf2_64(RetVal.Align.getQuantity()));
if (auto Max = FD->getMaxAlignment())
RetVal.Align = std::max(Ctx.toCharUnitsFromBits(Max), RetVal.Align);
return RetVal;
};
std::transform(RD->field_begin(), RD->field_end(),
std::back_inserter(Fields), GatherSizesAndAlignments);
llvm::sort(Fields.begin(), Fields.end());
// This lets us skip over vptrs and non-virtual bases,
// so that we can just worry about the fields in our object.
// Note that this does cause us to miss some cases where we
// could pack more bytes in to a base class's tail padding.
CharUnits NewOffset = ASTContext.toCharUnitsFromBits(RL.getFieldOffset(0));
CharUnits NewPad;
SmallVector<const FieldDecl *, 20> OptimalFieldsOrder;
while (!Fields.empty()) {
unsigned TrailingZeros =
llvm::countTrailingZeros((unsigned long long)NewOffset.getQuantity());
// If NewOffset is zero, then countTrailingZeros will be 64. Shifting
// 64 will overflow our unsigned long long. Shifting 63 will turn
// our long long (and CharUnits internal type) negative. So shift 62.
long long CurAlignmentBits = 1ull << (std::min)(TrailingZeros, 62u);
CharUnits CurAlignment = CharUnits::fromQuantity(CurAlignmentBits);
FieldInfo InsertPoint = {CurAlignment, CharUnits::Zero(), nullptr};
auto CurBegin = Fields.begin();
auto CurEnd = Fields.end();
// In the typical case, this will find the last element
// of the vector. We won't find a middle element unless
// we started on a poorly aligned address or have an overly
// aligned field.
auto Iter = std::upper_bound(CurBegin, CurEnd, InsertPoint);
if (Iter != CurBegin) {
// We found a field that we can layout with the current alignment.
--Iter;
NewOffset += Iter->Size;
OptimalFieldsOrder.push_back(Iter->Field);
Fields.erase(Iter);
} else {
// We are poorly aligned, and we need to pad in order to layout another
// field. Round up to at least the smallest field alignment that we
// currently have.
CharUnits NextOffset = NewOffset.alignTo(Fields[0].Align);
NewPad += NextOffset - NewOffset;
NewOffset = NextOffset;
}
}
// Calculate tail padding.
CharUnits NewSize = NewOffset.alignTo(RL.getAlignment());
NewPad += NewSize - NewOffset;
return {NewPad, std::move(OptimalFieldsOrder)};
}
void reportRecord(
const RecordDecl *RD, CharUnits BaselinePad, CharUnits OptimalPad,
const SmallVector<const FieldDecl *, 20> &OptimalFieldsOrder) const {
if (!PaddingBug)
PaddingBug =
llvm::make_unique<BugType>(this, "Excessive Padding", "Performance");
SmallString<100> Buf;
llvm::raw_svector_ostream Os(Buf);
Os << "Excessive padding in '";
Os << QualType::getAsString(RD->getTypeForDecl(), Qualifiers(),
LangOptions())
<< "'";
if (auto *TSD = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
// TODO: make this show up better in the console output and in
// the HTML. Maybe just make it show up in HTML like the path
// diagnostics show.
SourceLocation ILoc = TSD->getPointOfInstantiation();
if (ILoc.isValid())
Os << " instantiated here: "
<< ILoc.printToString(BR->getSourceManager());
}
Os << " (" << BaselinePad.getQuantity() << " padding bytes, where "
<< OptimalPad.getQuantity() << " is optimal). \n"
<< "Optimal fields order: \n";
for (const auto *FD : OptimalFieldsOrder)
Os << FD->getName() << ", \n";
Os << "consider reordering the fields or adding explicit padding "
"members.";
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::create(RD, BR->getSourceManager());
auto Report = llvm::make_unique<BugReport>(*PaddingBug, Os.str(), CELoc);
Report->setDeclWithIssue(RD);
Report->addRange(RD->getSourceRange());
BR->emitReport(std::move(Report));
}
};
}
void ento::registerPaddingChecker(CheckerManager &Mgr) {
Mgr.registerChecker<PaddingChecker>();
}