[BOLT][NFC] Follow LLVM variable initialization style

(cherry picked from FBD28417604)
This commit is contained in:
Maksim Panchenko 2021-05-13 10:50:47 -07:00
parent b728bfc70a
commit fe37f1870e
31 changed files with 175 additions and 175 deletions

View File

@ -315,7 +315,7 @@ void BinaryBasicBlock::addPredecessor(BinaryBasicBlock *Pred) {
void BinaryBasicBlock::removePredecessor(BinaryBasicBlock *Pred,
bool Multiple) {
// Note: the predecessor could be listed multiple times.
bool Erased{false};
bool Erased = false;
for (auto PredI = Predecessors.begin(); PredI != Predecessors.end(); ) {
if (*PredI == Pred) {
Erased = true;
@ -423,7 +423,7 @@ MCInst *BinaryBasicBlock::getTerminatorBefore(MCInst *Pos) {
BinaryContext &BC = Function->getBinaryContext();
auto Itr = rbegin();
bool Check = Pos ? false : true;
MCInst *FirstTerminator{nullptr};
MCInst *FirstTerminator = nullptr;
while (Itr != rend()) {
if (!Check) {
if (&*Itr == Pos)
@ -477,7 +477,7 @@ void BinaryBasicBlock::addTailCallInstruction(const MCSymbol *Target) {
}
uint32_t BinaryBasicBlock::getNumCalls() const {
uint32_t N{0};
uint32_t N = 0;
BinaryContext &BC = Function->getBinaryContext();
for (const MCInst &Instr : Instructions) {
if (BC.MIB->isCall(Instr))

View File

@ -425,7 +425,7 @@ BinaryContext::getSubBinaryData(BinaryData *BD) {
std::pair<const MCSymbol *, uint64_t>
BinaryContext::handleAddressRef(uint64_t Address, BinaryFunction &BF,
bool IsPCRel) {
uint64_t Addend{0};
uint64_t Addend = 0;
if (isAArch64()) {
// Check if this is an access to a constant island and create bookkeeping
@ -541,10 +541,10 @@ bool BinaryContext::analyzeJumpTable(const uint64_t Address,
const uint64_t NextJTAddress,
JumpTable::OffsetsType *Offsets) {
// Is one of the targets __builtin_unreachable?
bool HasUnreachable{false};
bool HasUnreachable = false;
// Number of targets other than __builtin_unreachable.
uint64_t NumRealEntries{0};
uint64_t NumRealEntries = 0;
constexpr uint64_t INVALID_OFFSET = std::numeric_limits<uint64_t>::max();
auto addOffset = [&](uint64_t Offset) {
@ -712,7 +712,7 @@ void BinaryContext::populateJumpTables() {
if (!BF.isSimple())
continue;
uint64_t NextJTAddress{0};
uint64_t NextJTAddress = 0;
auto NextJTI = std::next(JTI);
if (NextJTI != JTE) {
NextJTAddress = NextJTI->second->getAddress();
@ -818,7 +818,7 @@ BinaryContext::getOrCreateJumpTable(BinaryFunction &Function, uint64_t Address,
}
// Re-use the existing symbol if possible.
MCSymbol *JTLabel{nullptr};
MCSymbol *JTLabel = nullptr;
if (BinaryData *Object = getBinaryDataAtAddress(Address)) {
if (!isInternalSymbolName(Object->getSymbol()->getName()))
JTLabel = Object->getSymbol();
@ -905,7 +905,7 @@ bool BinaryContext::hasValidCodePadding(const BinaryFunction &BF) {
uint64_t Offset = BF.getSize();
MCInst Instr;
uint64_t InstrSize{0};
uint64_t InstrSize = 0;
uint64_t InstrAddress = BF.getAddress() + Offset;
using std::placeholders::_1;
@ -947,7 +947,7 @@ bool BinaryContext::hasValidCodePadding(const BinaryFunction &BF) {
// Some functions have a jump to the next function or to the padding area
// inserted after the body.
auto isSkipJump = [&](const MCInst &Instr) {
uint64_t TargetAddress{0};
uint64_t TargetAddress = 0;
if (MIB->isUnconditionalBranch(Instr) &&
MIB->evaluateBranch(Instr, InstrAddress, InstrSize, TargetAddress)) {
if (TargetAddress >= InstrAddress + InstrSize &&
@ -2052,7 +2052,7 @@ bool BinaryContext::validateEncoding(const MCInst &Inst,
}
uint64_t BinaryContext::getHotThreshold() const {
static uint64_t Threshold{0};
static uint64_t Threshold = 0;
if (Threshold == 0) {
Threshold = std::max((uint64_t)opts::ExecutionCountThreshold,
NumProfiledFuncs ? SumExecutionCount / (2 * NumProfiledFuncs) : 1);
@ -2093,7 +2093,7 @@ BinaryContext::getBinaryFunctionAtAddress(uint64_t Address) {
// registered at the original address. The new function (the one that the
// original was folded into) will hold the symbol.
if (const BinaryData *BD = getBinaryDataAtAddress(Address)) {
uint64_t EntryID{0};
uint64_t EntryID = 0;
BinaryFunction *BF = getFunctionForSymbol(BD->getSymbol(), &EntryID);
if (BF && EntryID == 0)
return BF;

View File

@ -222,7 +222,7 @@ void BinaryEmitter::emitFunctions() {
<< "\" : " << Function->getFunctionNumber() << '\n');
// Was any part of the function emitted.
bool Emitted{false};
bool Emitted = false;
// Turn off Intel JCC Erratum mitigation for cold code if requested
if (HasProfile && opts::X86AlignBranchBoundaryHotOnly &&

View File

@ -499,7 +499,7 @@ void BinaryFunction::print(raw_ostream &OS, std::string Annotation,
return;
// Offset of the instruction in function.
uint64_t Offset{0};
uint64_t Offset = 0;
if (BasicBlocks.empty() && !Instructions.empty()) {
// Print before CFG was built.
@ -1065,7 +1065,7 @@ bool BinaryFunction::disassemble() {
auto handlePCRelOperand =
[&](MCInst &Instruction, uint64_t Address, uint64_t Size) {
uint64_t TargetAddress{0};
uint64_t TargetAddress = 0;
if (!MIB->evaluateMemOperandTarget(Instruction, TargetAddress, Address,
Size)) {
errs() << "BOLT-ERROR: PC-relative operand can't be evaluated:\n";
@ -1105,7 +1105,7 @@ bool BinaryFunction::disassemble() {
auto fixStubTarget = [&](MCInst &LoadLowBits, MCInst &LoadHiBits,
uint64_t Target) {
const MCSymbol *TargetSymbol;
uint64_t Addend{0};
uint64_t Addend = 0;
std::tie(TargetSymbol, Addend) = BC.handleAddressRef(Target, *this, true);
int64_t Val;
@ -1185,7 +1185,7 @@ bool BinaryFunction::disassemble() {
}
// Check if there's a relocation associated with this instruction.
bool UsedReloc{false};
bool UsedReloc = false;
for (auto Itr = Relocations.lower_bound(Offset),
ItrE = Relocations.lower_bound(Offset + Size); Itr != ItrE; ++Itr) {
const Relocation &Relocation = Itr->second;
@ -1370,7 +1370,7 @@ bool BinaryFunction::disassemble() {
// Could not evaluate branch. Should be an indirect call or an
// indirect branch. Bail out on the latter case.
if (MIB->isIndirectBranch(Instruction)) {
uint64_t IndirectTarget{0};
uint64_t IndirectTarget = 0;
IndirectBranchType Result =
processIndirectBranch(Instruction, Size, Offset, IndirectTarget);
switch (Result) {
@ -1846,10 +1846,10 @@ bool BinaryFunction::postProcessIndirectBranches(
}
};
uint64_t NumIndirectJumps{0};
uint64_t NumIndirectJumps = 0;
MCInst *LastIndirectJump = nullptr;
BinaryBasicBlock *LastIndirectJumpBB{nullptr};
uint64_t LastJT{0};
BinaryBasicBlock *LastIndirectJumpBB = nullptr;
uint64_t LastJT = 0;
uint16_t LastJTIndexReg = BC.MIB->getNoRegister();
for (BinaryBasicBlock *BB : layout()) {
for (MCInst &Instr : *BB) {
@ -2039,10 +2039,10 @@ bool BinaryFunction::buildCFG(MCPlusBuilder::AllocatorIdTy AllocatorId) {
// Created basic blocks are sorted in layout order since they are
// created in the same order as instructions, and instructions are
// sorted by offsets.
BinaryBasicBlock *InsertBB{nullptr};
BinaryBasicBlock *PrevBB{nullptr};
bool IsLastInstrNop{false};
uint64_t LastInstrOffset{0};
BinaryBasicBlock *InsertBB = nullptr;
BinaryBasicBlock *PrevBB = nullptr;
bool IsLastInstrNop = false;
uint64_t LastInstrOffset = 0;
auto addCFIPlaceholders =
[this](uint64_t CFIOffset, BinaryBasicBlock *InsertBB) {
@ -3939,7 +3939,7 @@ void BinaryFunction::disambiguateJumpTables(
}
}
uint64_t NewJumpTableID{0};
uint64_t NewJumpTableID = 0;
const MCSymbol *NewJTLabel;
std::tie(NewJumpTableID, NewJTLabel) =
BC.duplicateJumpTable(*this, JT, Target);
@ -3994,8 +3994,8 @@ BinaryBasicBlock *BinaryFunction::splitEdge(BinaryBasicBlock *From,
++BI;
}
assert(I != From->succ_end() && "Invalid CFG edge in splitEdge!");
uint64_t OrigCount{BI->Count};
uint64_t OrigMispreds{BI->MispredictedCount};
uint64_t OrigCount = BI->Count;
uint64_t OrigMispreds = BI->MispredictedCount;
replaceJumpTableEntryIn(From, To, NewBBPtr);
From->replaceSuccessor(To, NewBBPtr, OrigCount, OrigMispreds);

View File

@ -364,7 +364,7 @@ class RewriteInstanceDiff {
auto Iter1 = Func1->layout_begin();
auto Iter2 = Func2->layout_begin();
bool Match{true};
bool Match = true;
std::map<const BinaryBasicBlock *, const BinaryBasicBlock *> Map;
std::map<double, std::pair<EdgeTy, EdgeTy>> EMap;
while (Iter1 != Func1->layout_end()) {
@ -431,7 +431,7 @@ class RewriteInstanceDiff {
BB2));
}
unsigned Printed{0};
unsigned Printed = 0;
setTitleColor();
outs()
<< "\nTop " << opts::DisplayCount
@ -462,7 +462,7 @@ class RewriteInstanceDiff {
/// Print the largest differences in edge counts from one binary to another
void reportHottestEdgeDiffs() {
unsigned Printed{0};
unsigned Printed = 0;
setTitleColor();
outs()
<< "\nTop " << opts::DisplayCount
@ -511,7 +511,7 @@ class RewriteInstanceDiff {
LTOAggregatedScore1[Iter->second] += Score;
}
double UnmappedScore{0};
double UnmappedScore = 0;
for (const auto &BFI : RI2.BC->getBinaryFunctions()) {
const BinaryFunction &Function = BFI.second;
bool Matched = FuncMap.find(&Function) != FuncMap.end();
@ -561,7 +561,7 @@ class RewriteInstanceDiff {
ScoreMap[Func2] = std::make_pair<>(Score1, Score2);
}
unsigned Printed{0};
unsigned Printed = 0;
setTitleColor();
outs() << "\nTop " << opts::DisplayCount
<< " largest differences in performance bin 2 -> bin 1:\n";
@ -606,7 +606,7 @@ class RewriteInstanceDiff {
/// Print hottest functions from each binary
void reportHottestFuncs() {
unsigned Printed{0};
unsigned Printed = 0;
setTitleColor();
outs() << "\nTop " << opts::DisplayCount
<< " hottest functions in binary 2:\n";

View File

@ -782,8 +782,8 @@ bool DataAggregator::doInterBranch(BinaryFunction *FromFunc,
BinaryFunction *ToFunc, uint64_t From,
uint64_t To, uint64_t Count,
uint64_t Mispreds) {
FuncBranchData *FromAggrData{nullptr};
FuncBranchData *ToAggrData{nullptr};
FuncBranchData *FromAggrData = nullptr;
FuncBranchData *ToAggrData = nullptr;
StringRef SrcFunc;
StringRef DstFunc;
if (FromFunc) {
@ -963,7 +963,7 @@ bool DataAggregator::recordTrace(
if (Branches) {
const MCInst *Instr = BB->getLastNonPseudoInstr();
uint64_t Offset{0};
uint64_t Offset = 0;
if (Instr) {
Offset = BC.MIB->getAnnotationWithDefault<uint32_t>(*Instr, "Offset");
} else {
@ -1289,7 +1289,7 @@ DataAggregator::parseAggregatedLBREntry() {
if (std::error_code EC = Frequency.getError())
return EC;
uint64_t Mispreds{0};
uint64_t Mispreds = 0;
if (Type == AggregatedLBREntry::BRANCH) {
while (checkAndConsumeFS()) {}
ErrorOr<int64_t> MispredsOrErr = parseNumberField(FieldSeparator, true);
@ -1331,7 +1331,7 @@ std::error_code DataAggregator::printLBRHeatMap() {
}
Heatmap HM(opts::HeatmapBlock, opts::HeatmapMinAddress,
opts::HeatmapMaxAddress);
uint64_t NumTotalSamples{0};
uint64_t NumTotalSamples = 0;
while (hasData()) {
ErrorOr<PerfBranchSample> SampleRes = parseBranchSample();
@ -1345,7 +1345,7 @@ std::error_code DataAggregator::printLBRHeatMap() {
// LBRs are stored in reverse execution order. NextLBR refers to the next
// executed branch record.
const LBREntry *NextLBR{nullptr};
const LBREntry *NextLBR = nullptr;
for (const LBREntry &LBR : Sample.LBR) {
if (NextLBR) {
// Record fall-through trace.
@ -1402,12 +1402,12 @@ std::error_code DataAggregator::parseBranchEvents() {
NamedRegionTimer T("parseBranch", "Parsing branch events", TimerGroupName,
TimerGroupDesc, opts::TimeAggregator);
uint64_t NumTotalSamples{0};
uint64_t NumEntries{0};
uint64_t NumSamples{0};
uint64_t NumSamplesNoLBR{0};
uint64_t NumTraces{0};
bool NeedsSkylakeFix{false};
uint64_t NumTotalSamples = 0;
uint64_t NumEntries = 0;
uint64_t NumSamples = 0;
uint64_t NumSamplesNoLBR = 0;
uint64_t NumTraces = 0;
bool NeedsSkylakeFix = false;
while (hasData() && NumTotalSamples < opts::MaxSamples) {
++NumTotalSamples;
@ -1438,7 +1438,7 @@ std::error_code DataAggregator::parseBranchEvents() {
// LBRs are stored in reverse execution order. NextPC refers to the next
// recorded executed PC.
uint64_t NextPC = opts::UseEventPC ? Sample.PC : 0;
uint32_t NumEntry{0};
uint32_t NumEntry = 0;
for (const LBREntry &LBR : Sample.LBR) {
++NumEntry;
// Hardware bug workaround: Intel Skylake (which has 32 LBR entries)
@ -1560,7 +1560,7 @@ std::error_code DataAggregator::parseBranchEvents() {
}
outs() << "PERF2BOLT: traces mismatching disassembled function contents: "
<< NumInvalidTraces;
float Perc{0.0f};
float Perc = 0.0f;
if (NumTraces > 0) {
Perc = NumInvalidTraces * 100.0f / NumTraces;
printColored(outs(), Perc, 5, 10);
@ -1648,8 +1648,8 @@ void DataAggregator::processBasicEvents() {
outs() << "PERF2BOLT: processing basic events (without LBR)...\n";
NamedRegionTimer T("processBasic", "Processing basic events",
TimerGroupName, TimerGroupDesc, opts::TimeAggregator);
uint64_t OutOfRangeSamples{0};
uint64_t NumSamples{0};
uint64_t OutOfRangeSamples = 0;
uint64_t NumSamples = 0;
for (auto &Sample : BasicSamples) {
const uint64_t PC = Sample.first;
const uint64_t HitCount = Sample.second;
@ -1666,7 +1666,7 @@ void DataAggregator::processBasicEvents() {
outs() << "PERF2BOLT: out of range samples recorded in unknown regions: "
<< OutOfRangeSamples;
float Perc{0.0f};
float Perc = 0.0f;
if (NumSamples > 0) {
outs() << " (";
Perc = OutOfRangeSamples * 100.0f / NumSamples;
@ -1779,7 +1779,7 @@ void DataAggregator::processPreAggregated() {
NamedRegionTimer T("processAggregated", "Processing aggregated branch events",
TimerGroupName, TimerGroupDesc, opts::TimeAggregator);
uint64_t NumTraces{0};
uint64_t NumTraces = 0;
for (const AggregatedLBREntry &AggrEntry : AggregatedLBRs) {
switch (AggrEntry.EntryType) {
case AggregatedLBREntry::BRANCH:
@ -1804,7 +1804,7 @@ void DataAggregator::processPreAggregated() {
<< " aggregated LBR entries\n";
outs() << "PERF2BOLT: traces mismatching disassembled function contents: "
<< NumInvalidTraces;
float Perc{0.0f};
float Perc = 0.0f;
if (NumTraces > 0) {
outs() << " (";
Perc = NumInvalidTraces * 100.0f / NumTraces;
@ -2206,8 +2206,8 @@ DataAggregator::writeAggregatedFile(StringRef OutputFilename) const {
<< FieldSeparator;
};
uint64_t BranchValues{0};
uint64_t MemValues{0};
uint64_t BranchValues = 0;
uint64_t MemValues = 0;
if (BAT)
OutFile << "boltedcollection\n";

View File

@ -119,7 +119,7 @@ void FuncBranchData::appendFrom(const FuncBranchData &FBD, uint64_t Offset) {
}
uint64_t FuncBranchData::getNumExecutedBranches() const {
uint64_t ExecutedBranches{0};
uint64_t ExecutedBranches = 0;
for (const BranchInfo &BI : Data) {
int64_t BranchCount = BI.Branches;
assert(BranchCount >= 0 && "branch execution count should not be negative");
@ -148,7 +148,7 @@ FuncSampleData::getSamples(uint64_t Start, uint64_t End) const {
return Val < SI.Loc.Offset;
}
};
uint64_t Result{0};
uint64_t Result = 0;
for (auto I = std::lower_bound(Data.begin(), Data.end(), Start, Compare()),
E = std::lower_bound(Data.begin(), Data.end(), End, Compare());
I != E; ++I) {
@ -327,7 +327,7 @@ Error DataReader::readProfilePreCFG(BinaryContext &BC) {
auto &MemAccessProfile =
BC.MIB->getOrCreateAnnotationAs<MemoryAccessProfile>(
II->second, "MemoryAccessProfile");
BinaryData *BD{nullptr};
BinaryData *BD = nullptr;
if (MI.Addr.IsSymbol) {
BD = BC.getBinaryDataByName(MI.Addr.Name);
}
@ -352,7 +352,7 @@ Error DataReader::readProfile(BinaryContext &BC) {
readProfile(Function);
}
uint64_t NumUnused{0};
uint64_t NumUnused = 0;
for (const StringMapEntry<FuncBranchData> &FuncData : NamesToBranches)
if (!FuncData.getValue().Used)
++NumUnused;
@ -513,8 +513,8 @@ bool DataReader::fetchProfileForOtherEntryPoints(BinaryFunction &BF) {
return false;
// Check if we are missing profiling data for secondary entry points
bool First{true};
bool Updated{false};
bool First = true;
bool Updated = false;
for (BinaryBasicBlock *BB : BF.BasicBlocks) {
if (First) {
First = false;
@ -610,7 +610,7 @@ void DataReader::readSampleData(BinaryFunction &BF) {
BF.removeTagsFromProfile();
bool NormalizeByInsnCount = usesEvent("cycles") || usesEvent("instructions");
bool NormalizeByCalls = usesEvent("branches");
static bool NagUser{true};
static bool NagUser = true;
if (NagUser) {
outs()
<< "BOLT-INFO: operating with basic samples profiling data (no LBR).\n";
@ -622,7 +622,7 @@ void DataReader::readSampleData(BinaryFunction &BF) {
NagUser = false;
}
uint64_t LastOffset = BF.getSize();
uint64_t TotalEntryCount{0};
uint64_t TotalEntryCount = 0;
for (auto I = BF.BasicBlockOffsets.rbegin(), E = BF.BasicBlockOffsets.rend();
I != E; ++I) {
uint64_t CurOffset = I->first;
@ -690,7 +690,7 @@ void DataReader::convertBranchData(BinaryFunction &BF) const {
IndirectCallSiteProfile &CSP =
BC.MIB->getOrCreateAnnotationAs<IndirectCallSiteProfile>(
*Instr, "CallProfile");
MCSymbol *CalleeSymbol{nullptr};
MCSymbol *CalleeSymbol = nullptr;
if (BI.To.IsSymbol) {
if (BinaryData *BD = BC.getBinaryDataByName(BI.To.Name)) {
CalleeSymbol = BD->getSymbol();

View File

@ -184,7 +184,7 @@ void BinaryFunction::parseLSDA(ArrayRef<uint8_t> LSDASectionData,
}
// Create a handler entry if necessary.
MCSymbol *LPSymbol{nullptr};
MCSymbol *LPSymbol = nullptr;
if (LandingPad) {
if (Instructions.find(LandingPad) == Instructions.end()) {
if (opts::Verbosity >= 1) {

View File

@ -249,7 +249,7 @@ void Heatmap::printCDF(StringRef FileName) const {
}
void Heatmap::printCDF(raw_ostream &OS) const {
uint64_t NumTotalCounts{0};
uint64_t NumTotalCounts = 0;
std::vector<uint64_t> Counts;
for (const std::pair<const uint64_t, uint64_t> &KV : Map) {
@ -263,7 +263,7 @@ void Heatmap::printCDF(raw_ostream &OS) const {
assert(NumTotalCounts > 0 &&
"total number of heatmap buckets should be greater than 0");
double RatioRightInPercent = 100.0 / NumTotalCounts;
uint64_t RunningCount{0};
uint64_t RunningCount = 0;
OS << "Bucket counts, Size (KB), CDF (%)\n";
for (uint64_t I = 0; I < Counts.size(); I++) {

View File

@ -74,7 +74,7 @@ JumpTable::getEntriesForAddress(const uint64_t Addr) const {
bool JumpTable::replaceDestination(uint64_t JTAddress, const MCSymbol *OldDest,
MCSymbol *NewDest) {
bool Patched{false};
bool Patched = false;
const std::pair<size_t, size_t> Range = getEntriesForAddress(JTAddress);
for (auto I = &Entries[Range.first], E = &Entries[Range.second]; I != E;
++I) {

View File

@ -123,7 +123,7 @@ void AlignerPass::alignBlocks(BinaryFunction &Function,
const uint64_t FuncCount =
std::max<uint64_t>(1, Function.getKnownExecutionCount());
BinaryBasicBlock *PrevBB{nullptr};
BinaryBasicBlock *PrevBB = nullptr;
for (BinaryBasicBlock *BB : Function.layout()) {
uint64_t Count = BB->getKnownExecutionCount();

View File

@ -75,7 +75,7 @@ void AllocCombinerPass::combineAdjustments(BinaryContext &BC,
if (isIndifferentToSP(Inst, BC))
continue; // Skip updating Prev
int64_t Adjustment{0LL};
int64_t Adjustment = 0LL;
if (!Prev || !BC.MIB->isStackAdjustment(Inst) ||
!BC.MIB->isStackAdjustment(*Prev) ||
!getStackAdjustmentSize(BC, *Prev, Adjustment)) {

View File

@ -1277,12 +1277,12 @@ void PrintProfileStats::runOnFunctions(BinaryContext &BC) {
void
PrintProgramStats::runOnFunctions(BinaryContext &BC) {
uint64_t NumRegularFunctions{0};
uint64_t NumStaleProfileFunctions{0};
uint64_t NumNonSimpleProfiledFunctions{0};
uint64_t NumUnknownControlFlowFunctions{0};
uint64_t TotalSampleCount{0};
uint64_t StaleSampleCount{0};
uint64_t NumRegularFunctions = 0;
uint64_t NumStaleProfileFunctions = 0;
uint64_t NumNonSimpleProfiledFunctions = 0;
uint64_t NumUnknownControlFlowFunctions = 0;
uint64_t TotalSampleCount = 0;
uint64_t StaleSampleCount = 0;
std::vector<BinaryFunction *> ProfiledFunctions;
const char *StaleFuncsHeader = "BOLT-INFO: Functions with stale profile:\n";
for (auto &BFI : BC.getBinaryFunctions()) {
@ -1724,7 +1724,7 @@ void SpecializeMemcpy1::runOnFunctions(BinaryContext &BC) {
BinaryBasicBlock *OneByteMemcpyBB = CurBB->splitAt(II);
BinaryBasicBlock *NextBB{nullptr};
BinaryBasicBlock *NextBB = nullptr;
if (OneByteMemcpyBB->getNumNonPseudos() > 1) {
NextBB = OneByteMemcpyBB->splitAt(OneByteMemcpyBB->begin());
NextBB->eraseInstruction(NextBB->begin());

View File

@ -38,7 +38,7 @@ void CallGraphWalker::traverseCG() {
Queue.pop();
InQueue.erase(Func);
bool Changed{false};
bool Changed = false;
for (CallbackTy Visitor : Visitors) {
bool CurVisit = Visitor(Func);
Changed = Changed || CurVisit;

View File

@ -115,10 +115,10 @@ class FrameAccessAnalysis {
FrameIndexEntry FIE;
bool decodeFrameAccess(const MCInst &Inst) {
int32_t SrcImm{0};
MCPhysReg Reg{0};
int64_t StackOffset{0};
bool IsIndexed{false};
int32_t SrcImm = 0;
MCPhysReg Reg = 0;
int64_t StackOffset = 0;
bool IsIndexed = false;
if (!BC.MIB->isStackAccess(
Inst, FIE.IsLoad, FIE.IsStore, FIE.IsStoreFromReg, Reg, SrcImm,
FIE.StackPtrReg, StackOffset, FIE.Size, FIE.IsSimple, IsIndexed)) {

View File

@ -212,8 +212,8 @@ bool isIdenticalWith(const BinaryFunction &A, const BinaryFunction &B,
return true;
// Compare symbols as functions.
uint64_t EntryIDA{0};
uint64_t EntryIDB{0};
uint64_t EntryIDA = 0;
uint64_t EntryIDB = 0;
const BinaryFunction *FunctionA =
BC.getFunctionForSymbol(SymbolA, &EntryIDA);
const BinaryFunction *FunctionB =
@ -411,7 +411,7 @@ namespace bolt {
void IdenticalCodeFolding::runOnFunctions(BinaryContext &BC) {
const size_t OriginalFunctionCount = BC.getBinaryFunctions().size();
uint64_t NumFunctionsFolded{0};
uint64_t NumFunctionsFolded = 0;
std::atomic<uint64_t> NumJTFunctionsFolded{0};
std::atomic<uint64_t> BytesSavedEstimate{0};
std::atomic<uint64_t> CallsSavedEstimate{0};

View File

@ -606,7 +606,7 @@ IndirectCallPromotion::findCallTargetSymbols(
const size_t TopN = opts::IndirectCallPromotionJumpTablesTopN != 0
? opts::IndirectCallPromotionTopN
: opts::IndirectCallPromotionTopN;
size_t I{0};
size_t I = 0;
for (; I < HotTargets.size(); ++I) {
const uint64_t MemAccesses = HotTargets[I].first;
if (100 * MemAccesses <

View File

@ -456,7 +456,7 @@ bool Inliner::inlineCallsInFunction(BinaryFunction &Function) {
assert(TargetSymbol && "target symbol expected for direct call");
// Don't inline calls to a secondary entry point in a target function.
uint64_t EntryID{0};
uint64_t EntryID = 0;
BinaryFunction *TargetFunction =
BC.getFunctionForSymbol(TargetSymbol, &EntryID);
if (!TargetFunction || EntryID != 0) {

View File

@ -242,7 +242,7 @@ bool Instrumentation::instrumentOneTarget(
FunctionDescription *FuncDesc, uint32_t FromNodeID, uint32_t ToNodeID) {
{
auto L = FromFunction.getBinaryContext().scopeLock();
bool Created{true};
bool Created = true;
if (!TargetBB)
Created = createCallDescription(*FuncDesc, FromFunction, From, FromNodeID,
ToFunc, ToOffset, IsInvoke);
@ -364,7 +364,7 @@ void Instrumentation::instrumentFunction(BinaryContext &BC,
// Determine whether this is a leaf function, which needs special
// instructions to protect the red zone
bool IsLeafFunction{true};
bool IsLeafFunction = true;
DenseSet<const BinaryBasicBlock *> InvokeBlocks;
for (auto BBI = Function.begin(), BBE = Function.end(); BBI != BBE; ++BBI) {
for (auto I = BBI->begin(), E = BBI->end(); I != E; ++I) {
@ -378,9 +378,9 @@ void Instrumentation::instrumentFunction(BinaryContext &BC,
}
for (auto BBI = Function.begin(), BBE = Function.end(); BBI != BBE; ++BBI) {
BinaryBasicBlock &BB{*BBI};
bool HasUnconditionalBranch{false};
bool HasJumpTable{false};
BinaryBasicBlock &BB = *BBI;
bool HasUnconditionalBranch = false;
bool HasJumpTable = false;
bool IsInvokeBlock = InvokeBlocks.count(&BB) > 0;
for (auto I = BB.begin(); I != BB.end(); ++I) {
@ -495,7 +495,7 @@ void Instrumentation::instrumentFunction(BinaryContext &BC,
// Instrument spanning tree leaves
if (!opts::ConservativeInstrumentation) {
for (auto BBI = Function.begin(), BBE = Function.end(); BBI != BBE; ++BBI) {
BinaryBasicBlock &BB{*BBI};
BinaryBasicBlock &BB = *BBI;
if (STOutSet[&BB].size() == 0)
instrumentLeafNode(BC, BB, BB.begin(), IsLeafFunction, *FuncDesc,
BBToID[&BB]);

View File

@ -231,8 +231,8 @@ LongJmpPass::replaceTargetWithStub(BinaryBasicBlock &BB, MCInst &Inst,
// Local branch
if (TgtBB) {
uint64_t OrigCount{BI.Count};
uint64_t OrigMispreds{BI.MispredictedCount};
uint64_t OrigCount = BI.Count;
uint64_t OrigMispreds = BI.MispredictedCount;
BB.replaceSuccessor(TgtBB, StubBB, OrigCount, OrigMispreds);
StubBB->setExecutionCount(StubBB->getExecutionCount() + OrigCount);
if (NewBB) {
@ -281,7 +281,7 @@ void LongJmpPass::tentativeBBLayout(const BinaryFunction &Func) {
const BinaryContext &BC = Func.getBinaryContext();
uint64_t HotDot = HotAddresses[&Func];
uint64_t ColdDot = ColdAddresses[&Func];
bool Cold{false};
bool Cold = false;
for (BinaryBasicBlock *BB : Func.layout()) {
if (Cold || BB->isCold()) {
Cold = true;
@ -424,7 +424,7 @@ uint64_t LongJmpPass::getSymbolAddress(const BinaryContext &BC,
assert (Iter != BBAddresses.end() && "Unrecognized BB");
return Iter->second;
}
uint64_t EntryID{0};
uint64_t EntryID = 0;
const BinaryFunction *TargetFunc = BC.getFunctionForSymbol(Target, &EntryID);
auto Iter = HotAddresses.find(TargetFunc);
if (Iter == HotAddresses.end() || (TargetFunc && EntryID)) {
@ -515,7 +515,7 @@ bool LongJmpPass::needsStub(const BinaryBasicBlock &BB, const MCInst &Inst,
bool LongJmpPass::relax(BinaryFunction &Func) {
const BinaryContext &BC = Func.getBinaryContext();
bool Modified{false};
bool Modified = false;
assert(BC.isAArch64() && "Unsupported arch");
constexpr int InsnSize = 4; // AArch64
@ -604,7 +604,7 @@ void LongJmpPass::runOnFunctions(BinaryContext &BC) {
outs() << "BOLT-INFO: Starting stub-insertion pass\n";
std::vector<BinaryFunction *> Sorted = BC.getSortedFunctions();
bool Modified;
uint32_t Iterations{0};
uint32_t Iterations = 0;
do {
++Iterations;
Modified = false;

View File

@ -113,7 +113,7 @@ void computeEdgeWeights(BinaryBasicBlock *BB, EdgeWeightMap &EdgeWeights) {
typedef GraphTraits<NodeT> GraphT;
typedef GraphTraits<Inverse<NodeT> > InvTraits;
double TotalChildrenCount{0.0};
double TotalChildrenCount = 0.0;
SmallVector<double, 4> ChildrenExecCount;
// First pass computes total children execution count that directly
// contribute to this BB.
@ -150,7 +150,7 @@ void computeEdgeWeights(BinaryBasicBlock *BB, EdgeWeightMap &EdgeWeights) {
TotalChildrenCount += ChildExecCount;
}
// Second pass fixes the weight of a possible self-reference edge
uint32_t ChildIndex{0};
uint32_t ChildIndex = 0;
for (typename GraphT::ChildIteratorType CI = GraphT::child_begin(BB),
E = GraphT::child_end(BB); CI != E; ++CI) {
typename GraphT::NodeRef Child = *CI;
@ -189,7 +189,7 @@ void computeEdgeWeights(BinaryFunction &BF, EdgeWeightMap &EdgeWeights) {
/// make it match max(SumPredEdges, SumSuccEdges).
void recalculateBBCounts(BinaryFunction &BF, bool AllEdges) {
for (BinaryBasicBlock &BB : BF) {
uint64_t TotalPredsEWeight{0};
uint64_t TotalPredsEWeight = 0;
for (BinaryBasicBlock *Pred : BB.predecessors()) {
TotalPredsEWeight += Pred->getBranchInfo(BB).Count;
}
@ -201,7 +201,7 @@ void recalculateBBCounts(BinaryFunction &BF, bool AllEdges) {
if (!AllEdges)
continue;
uint64_t TotalSuccsEWeight{0};
uint64_t TotalSuccsEWeight = 0;
for (BinaryBasicBlock::BinaryBranchInfo &BI : BB.branch_info()) {
TotalSuccsEWeight += BI.Count;
}
@ -256,8 +256,8 @@ bool guessPredEdgeCounts(BinaryBasicBlock *BB, ArcSet &GuessedArcs) {
if (BB->pred_size() == 0)
return false;
uint64_t TotalPredCount{0};
unsigned NumGuessedEdges{0};
uint64_t TotalPredCount = 0;
unsigned NumGuessedEdges = 0;
for (BinaryBasicBlock *Pred : BB->predecessors()) {
if (GuessedArcs.count(std::make_pair(Pred, BB)))
++NumGuessedEdges;
@ -290,8 +290,8 @@ bool guessSuccEdgeCounts(BinaryBasicBlock *BB, ArcSet &GuessedArcs) {
if (BB->succ_size() == 0)
return false;
uint64_t TotalSuccCount{0};
unsigned NumGuessedEdges{0};
uint64_t TotalSuccCount = 0;
unsigned NumGuessedEdges = 0;
auto BI = BB->branch_info_begin();
for (BinaryBasicBlock *Succ : BB->successors()) {
if (GuessedArcs.count(std::make_pair(BB, Succ)))
@ -328,7 +328,7 @@ bool guessSuccEdgeCounts(BinaryBasicBlock *BB, ArcSet &GuessedArcs) {
/// change.
void guessEdgeByIterativeApproach(BinaryFunction &BF) {
ArcSet KnownArcs;
bool Changed{false};
bool Changed = false;
do {
Changed = false;
@ -447,7 +447,7 @@ void equalizeBBCounts(BinaryFunction &BF) {
}
for (std::vector<BinaryBasicBlock *> &Class : Classes) {
uint64_t Max{0ULL};
uint64_t Max = 0ULL;
for (BinaryBasicBlock *BB : Class) {
Max = std::max(Max, BB->getExecutionCount());
}

View File

@ -186,7 +186,7 @@ void ReorderData::assignMemData(BinaryContext &BC) {
// Map of sections (or heap/stack) to count/size.
StringMap<uint64_t> Counts;
StringMap<uint64_t> JumpTableCounts;
uint64_t TotalCount{0};
uint64_t TotalCount = 0;
for (auto &BFI : BC.getBinaryFunctions()) {
const BinaryFunction &BF = BFI.second;
if (!BF.hasMemoryProfile())

View File

@ -267,7 +267,7 @@ void StackLayoutModifier::checkStackPointerRestore(MCInst &Point) {
// value may need to be updated depending on our stack layout changes
const MCInstrDesc &InstInfo = BC.MII->get(Point.getOpcode());
unsigned NumDefs = InstInfo.getNumDefs();
bool UsesFP{false};
bool UsesFP = false;
for (unsigned I = NumDefs, E = MCPlus::getNumPrimeOperands(Point);
I < E; ++I) {
MCOperand &Operand = Point.getOperand(I);
@ -367,8 +367,8 @@ void StackLayoutModifier::classifyStackAccesses() {
void StackLayoutModifier::classifyCFIs() {
std::stack<std::pair<int64_t, uint16_t>> CFIStack;
int64_t CfaOffset{-8};
uint16_t CfaReg{7};
int64_t CfaOffset = -8;
uint16_t CfaReg = 7;
auto recordAccess = [&](MCInst *Inst, int64_t Offset) {
const uint16_t Reg = *BC.MRI->getLLVMRegNum(CfaReg, /*isEH=*/false);
@ -673,17 +673,17 @@ void StackLayoutModifier::performChanges() {
BF.mutateCFIOffsetFor(Inst, CFI->getOffset() + Adjustment);
continue;
}
int32_t SrcImm{0};
MCPhysReg Reg{0};
MCPhysReg StackPtrReg{0};
int64_t StackOffset{0};
bool IsIndexed{false};
bool IsLoad{false};
bool IsStore{false};
bool IsSimple{false};
bool IsStoreFromReg{false};
uint8_t Size{0};
bool Success{false};
int32_t SrcImm = 0;
MCPhysReg Reg = 0;
MCPhysReg StackPtrReg = 0;
int64_t StackOffset = 0;
bool IsIndexed = false;
bool IsLoad = false;
bool IsStore = false;
bool IsSimple = false;
bool IsStoreFromReg = false;
uint8_t Size = 0;
bool Success = false;
Success = BC.MIB->isStackAccess(Inst, IsLoad, IsStore, IsStoreFromReg,
Reg, SrcImm, StackPtrReg, StackOffset,
Size, IsSimple, IsIndexed);
@ -897,7 +897,7 @@ bool ShrinkWrapping::isBestSavePosCold(unsigned CSR, MCInst *&BestPosSave,
uint64_t BestCount = BestSaveCount[CSR];
BestPosSave = BestSavePos[CSR];
bool ShouldMove{false};
bool ShouldMove = false;
if (BestCount != std::numeric_limits<uint64_t>::max() &&
BestCount < (opts::ShrinkWrappingThreshold / 100.0) * CurSavingCost) {
LLVM_DEBUG({
@ -983,7 +983,7 @@ ShrinkWrapping::doRestorePlacement(MCInst *BestPosSave, unsigned CSR,
uint64_t TotalEstimatedWin) {
SmallVector<ProgramPoint, 4> Frontier;
SmallVector<bool, 4> IsCritEdge;
bool CannotPlace{false};
bool CannotPlace = false;
DominatorAnalysis<false> &DA = Info.getDominatorAnalysis();
SmallVector<BinaryBasicBlock *, 4> CritEdgesFrom;
@ -1004,7 +1004,7 @@ ShrinkWrapping::doRestorePlacement(MCInst *BestPosSave, unsigned CSR,
}
});
for (ProgramPoint &PP : Frontier) {
bool HasCritEdges{false};
bool HasCritEdges = false;
if (PP.isInst() && BC.MIB->isTerminator(*PP.getInst()) &&
doesInstUsesCSR(*PP.getInst(), CSR)) {
CannotPlace = true;
@ -1260,7 +1260,7 @@ void ShrinkWrapping::scheduleSaveRestoreInsertions(
FrontierBB->getTerminatorBefore(PP.isInst() ? PP.getInst() : nullptr);
if (Term)
PP = Term;
bool PrecededByPrefix{false};
bool PrecededByPrefix = false;
if (PP.isInst()) {
auto Iter = FrontierBB->findInstruction(PP.getInst());
if (Iter != FrontierBB->end() && Iter != FrontierBB->begin()) {
@ -1285,15 +1285,15 @@ void ShrinkWrapping::scheduleSaveRestoreInsertions(
}
void ShrinkWrapping::moveSaveRestores() {
bool DisablePushPopMode{false};
bool UsedPushPopMode{false};
bool DisablePushPopMode = false;
bool UsedPushPopMode = false;
// Keeps info about successfully moved regs: reg index, save position and
// save size
std::vector<std::tuple<unsigned, MCInst *, size_t>> MovedRegs;
for (unsigned I = 0, E = BC.MRI->getNumRegs(); I != E; ++I) {
MCInst *BestPosSave{nullptr};
uint64_t TotalEstimatedWin{0};
MCInst *BestPosSave = nullptr;
uint64_t TotalEstimatedWin = 0;
if (!isBestSavePosCold(I, BestPosSave, TotalEstimatedWin))
continue;
SmallVector<ProgramPoint, 4> RestorePoints =
@ -1421,7 +1421,7 @@ bool isIdenticalSplitEdgeBB(const BinaryContext &BC,
}
bool ShrinkWrapping::foldIdenticalSplitEdges() {
bool Changed{false};
bool Changed = false;
for (auto Iter = BF.begin(); Iter != BF.end(); ++Iter) {
BinaryBasicBlock &BB = *Iter;
if (!BB.getName().startswith(".LSplitEdge"))
@ -1436,8 +1436,8 @@ bool ShrinkWrapping::foldIdenticalSplitEdges() {
continue;
assert(RBB.pred_size() == 1 && "Invalid split edge BB");
BinaryBasicBlock *Pred = *RBB.pred_begin();
uint64_t OrigCount{Pred->branch_info_begin()->Count};
uint64_t OrigMispreds{Pred->branch_info_begin()->MispredictedCount};
uint64_t OrigCount = Pred->branch_info_begin()->Count;
uint64_t OrigMispreds = Pred->branch_info_begin()->MispredictedCount;
BF.replaceJumpTableEntryIn(Pred, &RBB, &BB);
Pred->replaceSuccessor(&RBB, &BB, OrigCount, OrigMispreds);
Changed = true;
@ -1535,20 +1535,20 @@ public:
void ShrinkWrapping::insertUpdatedCFI(unsigned CSR, int SPValPush,
int SPValPop) {
MCInst *SavePoint{nullptr};
MCInst *SavePoint = nullptr;
for (BinaryBasicBlock &BB : BF) {
for (auto InstIter = BB.rbegin(), EndIter = BB.rend(); InstIter != EndIter;
++InstIter) {
int32_t SrcImm{0};
MCPhysReg Reg{0};
MCPhysReg StackPtrReg{0};
int64_t StackOffset{0};
bool IsIndexed{false};
bool IsLoad{false};
bool IsStore{false};
bool IsSimple{false};
bool IsStoreFromReg{false};
uint8_t Size{0};
int32_t SrcImm = 0;
MCPhysReg Reg = 0;
MCPhysReg StackPtrReg = 0;
int64_t StackOffset = 0;
bool IsIndexed = false;
bool IsLoad = false;
bool IsStore = false;
bool IsSimple = false;
bool IsStoreFromReg = false;
uint8_t Size = 0;
if (!BC.MIB->isStackAccess(*InstIter, IsLoad, IsStore, IsStoreFromReg,
Reg, SrcImm, StackPtrReg, StackOffset,
Size, IsSimple, IsIndexed))
@ -1566,8 +1566,8 @@ void ShrinkWrapping::insertUpdatedCFI(unsigned CSR, int SPValPush,
dbgs() << "Now using as save point for reg " << CSR << " :";
SavePoint->dump();
});
bool PrevAffectedZone{false};
BinaryBasicBlock *PrevBB{nullptr};
bool PrevAffectedZone = false;
BinaryBasicBlock *PrevBB = nullptr;
DominatorAnalysis<false> &DA = Info.getDominatorAnalysis();
for (BinaryBasicBlock *BB : BF.layout()) {
if (BB->size() == 0)
@ -1623,8 +1623,8 @@ void ShrinkWrapping::rebuildCFIForSP() {
}
}
int PrevSPVal{-8};
BinaryBasicBlock *PrevBB{nullptr};
int PrevSPVal = -8;
BinaryBasicBlock *PrevBB = nullptr;
StackPointerTracking &SPT = Info.getStackPointerTracking();
for (BinaryBasicBlock *BB : BF.layout()) {
if (BB->size() == 0)
@ -1812,7 +1812,7 @@ BBIterTy ShrinkWrapping::processInsertion(BBIterTy InsertionPoint,
BBIterTy ShrinkWrapping::processInsertionsList(
BBIterTy InsertionPoint, BinaryBasicBlock *CurBB,
std::vector<WorklistItem> &TodoList, int64_t SPVal, int64_t FPVal) {
bool HasInsertions{false};
bool HasInsertions = false;
for (WorklistItem &Item : TodoList) {
if (Item.Action == WorklistItem::Erase ||
Item.Action == WorklistItem::ChangeToAdjustment)
@ -1881,7 +1881,7 @@ bool ShrinkWrapping::processInsertions() {
PredictiveStackPointerTracking PSPT(BC, BF, Todo, Info, AllocatorId);
PSPT.run();
bool Changes{false};
bool Changes = false;
for (BinaryBasicBlock &BB : BF) {
// Process insertions before some inst.
for (auto I = BB.begin(); I != BB.end(); ++I) {

View File

@ -134,7 +134,7 @@ bool ValidateInternalCalls::fixCFGForIC(BinaryFunction &Function) const {
// We use the InsnToBB map that DataflowInfoManager provides us
DataflowInfoManager Info(BC, Function, nullptr, nullptr);
bool Updated{false};
bool Updated = false;
auto processReturns = [&] (BinaryBasicBlock &BB, MCInst &Return) {
// Check all reaching internal calls
@ -217,10 +217,10 @@ bool ValidateInternalCalls::analyzeFunction(BinaryFunction &Function) const {
}
FrameIndexEntry FIE;
int32_t SrcImm{0};
MCPhysReg Reg{0};
int64_t StackOffset{0};
bool IsIndexed{false};
int32_t SrcImm = 0;
MCPhysReg Reg = 0;
int64_t StackOffset = 0;
bool IsIndexed = false;
MCInst *TargetInst = ProgramPoint::getFirstPointAt(*Target).getInst();
if (!BC.MIB->isStackAccess(*TargetInst, FIE.IsLoad, FIE.IsStore,
FIE.IsStoreFromReg, Reg, SrcImm,
@ -248,7 +248,7 @@ bool ValidateInternalCalls::analyzeFunction(BinaryFunction &Function) const {
RU.run();
int64_t Offset = static_cast<int64_t>(Target->getInputOffset());
bool UseDetected{false};
bool UseDetected = false;
for (auto I = RU.expr_begin(*RU.getStateBefore(*TargetInst)),
E = RU.expr_end();
I != E; ++I) {

View File

@ -385,7 +385,7 @@ size_t Relocation::emit(MCStreamer *Streamer) const {
if (isPCRelative(Type)) {
MCSymbol *TempLabel = Ctx.createNamedTempSymbol();
Streamer->emitLabel(TempLabel);
const MCExpr *Value{nullptr};
const MCExpr *Value = nullptr;
if (Symbol) {
Value = MCSymbolRefExpr::create(Symbol, Ctx);
if (Addend) {

View File

@ -1143,7 +1143,7 @@ void RewriteInstance::discoverFileObjects() {
}
}
BinaryFunction *BF{nullptr};
BinaryFunction *BF = nullptr;
// Since function may not have yet obtained its real size, do a search
// using the list of registered functions instead of calling
// getBinaryFunctionAtAddress().
@ -1365,7 +1365,7 @@ void RewriteInstance::adjustFunctionBoundaries() {
BFE = BC->getBinaryFunctions().end();
BFI != BFE; ++BFI) {
BinaryFunction &Function = BFI->second;
const BinaryFunction *NextFunction{nullptr};
const BinaryFunction *NextFunction = nullptr;
if (std::next(BFI) != BFE)
NextFunction = &std::next(BFI)->second;
@ -2373,7 +2373,7 @@ void RewriteInstance::readRelocations(const SectionRef &Section) {
BC->getBinaryFunctionAtAddress(Address + 1)) {
// Do an extra check that the function was referenced previously.
// It's a linear search, but it should rarely happen.
bool Found{false};
bool Found = false;
for (const auto &RelKV : ContainingBF->Relocations) {
const Relocation &Rel = RelKV.second;
if (Rel.Symbol == RogueBF->getSymbol() &&
@ -3270,7 +3270,7 @@ void RewriteInstance::mapCodeSections(RuntimeDyld &RTDyld) {
dbgs() << Section->getName() << '\n';
});
uint64_t PaddingSize{0}; // size of padding required at the end
uint64_t PaddingSize = 0; // size of padding required at the end
// Allocate sections starting at a given Address.
auto allocateAt = [&](uint64_t Address) {
@ -3294,7 +3294,7 @@ void RewriteInstance::mapCodeSections(RuntimeDyld &RTDyld) {
};
// Check if we can fit code in the original .text
bool AllocationDone{false};
bool AllocationDone = false;
if (opts::UseOldText) {
const uint64_t CodeSize =
allocateAt(BC->OldTextSectionAddress) - BC->OldTextSectionAddress;
@ -4989,7 +4989,7 @@ void RewriteInstance::writeEHFrameHeader() {
BC->AsmInfo->getCodePointerSize()));
check_error(std::move(E), "failed to parse EH frame");
uint64_t OldEHFrameAddress{0};
uint64_t OldEHFrameAddress = 0;
StringRef OldEHFrameContents;
ErrorOr<BinarySection &> OldEHFrameSection =
BC->getUniqueSectionByName(Twine(getOrgSecPrefix(), ".eh_frame").str());

View File

@ -273,7 +273,7 @@ public:
uint64_t Address,
uint64_t Size) const override {
int64_t DispValue;
const MCExpr* DispExpr{nullptr};
const MCExpr *DispExpr = nullptr;
if (!evaluateAArch64MemoryOperand(Inst, DispValue, &DispExpr))
return false;

View File

@ -1031,7 +1031,7 @@ public:
unsigned IndexRegNum;
int64_t DispValue;
unsigned SegRegNum;
const MCExpr* DispExpr{nullptr};
const MCExpr *DispExpr = nullptr;
if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue, &IndexRegNum,
&DispValue, &SegRegNum, &DispExpr)) {
return false;
@ -1610,7 +1610,7 @@ public:
unsigned IndexRegNum;
int64_t DispValue;
unsigned SegRegNum;
const MCExpr *DispExpr{nullptr};
const MCExpr *DispExpr = nullptr;
if (!evaluateX86MemoryOperand(Inst, &BaseRegNum, &ScaleValue,
&IndexRegNum, &DispValue, &SegRegNum,
&DispExpr)) {
@ -1898,7 +1898,7 @@ public:
// Modify the instruction.
MCOperand ImmOp = MCOperand::createImm(ImmVal);
uint32_t TargetOpNum{0};
uint32_t TargetOpNum = 0;
// Test instruction does not follow the regular pattern of putting the
// memory reference of a load (5 MCOperands) last in the list of operands.
// Since it is not modifying the register operand, it is not treated as
@ -3197,7 +3197,7 @@ public:
// Check if the target address expression used in the original indirect call
// uses the stack pointer, which we are going to clobber.
static BitVector SPAliases(getAliases(X86::RSP));
bool UsesSP{false};
bool UsesSP = false;
// Skip defs.
for (unsigned I = Info->get(CallInst.getOpcode()).getNumDefs(),
E = MCPlus::getNumPrimeOperands(CallInst); I != E; ++I) {

View File

@ -361,7 +361,7 @@ Error YAMLProfileReader::readProfile(BinaryContext &BC) {
if (I == LTOCommonNameMap.end())
continue;
bool ProfileMatched{false};
bool ProfileMatched = false;
std::vector<yaml::bolt::BinaryFunctionProfile *> &LTOProfiles =
I->getValue();
for (yaml::bolt::BinaryFunctionProfile *YamlBF : LTOProfiles) {
@ -408,7 +408,7 @@ Error YAMLProfileReader::readProfile(BinaryContext &BC) {
NormalizeByInsnCount = usesEvent("cycles") || usesEvent("instructions");
NormalizeByCalls = usesEvent("branches");
uint64_t NumUnused{0};
uint64_t NumUnused = 0;
for (yaml::bolt::BinaryFunctionProfile &YamlBF : YamlBP.Functions) {
if (YamlBF.Id >= YamlProfileToFunction.size()) {
// Such profile was ignored.

View File

@ -81,7 +81,7 @@ convert(const BinaryFunction &BF, yaml::bolt::BinaryFunctionProfile &YamlBF) {
YamlBB.CallSites.push_back(CSI);
}
} else { // direct call or a tail call
uint64_t EntryID{0};
uint64_t EntryID = 0;
const MCSymbol *CalleeSymbol = BC.MIB->getTargetSymbol(Instr);
const BinaryFunction *const Callee =
BC.getFunctionForSymbol(CalleeSymbol, &EntryID);

View File

@ -243,8 +243,8 @@ bool isYAML(const StringRef Filename) {
void mergeLegacyProfiles(const cl::list<std::string> &Filenames) {
errs() << "Using legacy profile format.\n";
bool BoltedCollection{false};
bool First{true};
bool BoltedCollection = false;
bool First = true;
for (const std::string &Filename : Filenames) {
if (isYAML(Filename))
report_error(Filename, "cannot mix YAML and legacy formats");