llvm-profdata: Indirect infrequently used fields to reduce memory usage

Examining a large profile example, it seems relatively few records have
non-empty IndirectCall and MemOP data, so indirecting these through a
unique_ptr (non-null only when they are non-empty) Reduces memory usage
on this particular example from 14GB to 10GB according to valgrind's
massif.

I suspect it'd still be worth moving InstrProfWriter to its own data
structure that had Counts and the indirected IndirectCall+MemOP, and did
not include the Name, Hash, or Error fields. This would reduce the size
of this dominant data structure by half of this new, lower amount.
(Name(2), Hash(1), Error(1) ~= Counts(vector, 3), ValueProfData
(unique_ptr, 1))
-> From code review feedback, might actually refactor InstrProfRecord
itself to have a sub-struct with all the counts, and use that from
InstrProfWriter, rather than InstrProfWriter owning its own data
structure for this.

Reviewers: davidxl

Differential Revision: https://reviews.llvm.org/D34694

llvm-svn: 306631
This commit is contained in:
David Blaikie 2017-06-29 02:51:58 +00:00
parent 1631129834
commit d16a61d2c8
2 changed files with 66 additions and 30 deletions

View File

@ -598,6 +598,28 @@ struct InstrProfRecord {
InstrProfRecord() = default; InstrProfRecord() = default;
InstrProfRecord(StringRef Name, uint64_t Hash, std::vector<uint64_t> Counts) InstrProfRecord(StringRef Name, uint64_t Hash, std::vector<uint64_t> Counts)
: Name(Name), Hash(Hash), Counts(std::move(Counts)) {} : Name(Name), Hash(Hash), Counts(std::move(Counts)) {}
InstrProfRecord(InstrProfRecord &&) = default;
InstrProfRecord(const InstrProfRecord &RHS)
: Name(RHS.Name), Hash(RHS.Hash), Counts(RHS.Counts), SIPE(RHS.SIPE),
ValueData(RHS.ValueData
? llvm::make_unique<ValueProfData>(*RHS.ValueData)
: nullptr) {}
InstrProfRecord &operator=(InstrProfRecord &&) = default;
InstrProfRecord &operator=(const InstrProfRecord &RHS) {
Name = RHS.Name;
Hash = RHS.Hash;
Counts = RHS.Counts;
SIPE = RHS.SIPE;
if (!RHS.ValueData) {
ValueData = nullptr;
return *this;
}
if (!ValueData)
ValueData = llvm::make_unique<ValueProfData>(*RHS.ValueData);
else
*ValueData = *RHS.ValueData;
return *this;
}
using ValueMapType = std::vector<std::pair<uint64_t, uint64_t>>; using ValueMapType = std::vector<std::pair<uint64_t, uint64_t>>;
@ -647,12 +669,9 @@ struct InstrProfRecord {
/// Sort value profile data (per site) by count. /// Sort value profile data (per site) by count.
void sortValueData() { void sortValueData() {
for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) { for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
std::vector<InstrProfValueSiteRecord> &SiteRecords = for (auto &SR : getValueSitesForKind(Kind))
getValueSitesForKind(Kind);
for (auto &SR : SiteRecords)
SR.sortByCount(); SR.sortByCount();
}
} }
/// Clear value data entries and edge counters. /// Clear value data entries and edge counters.
@ -662,36 +681,54 @@ struct InstrProfRecord {
} }
/// Clear value data entries /// Clear value data entries
void clearValueData() { void clearValueData() { ValueData = nullptr; }
for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
getValueSitesForKind(Kind).clear();
}
/// Get the error contained within the record's soft error counter. /// Get the error contained within the record's soft error counter.
Error takeError() { return SIPE.takeError(); } Error takeError() { return SIPE.takeError(); }
private: private:
std::vector<InstrProfValueSiteRecord> IndirectCallSites; struct ValueProfData {
std::vector<InstrProfValueSiteRecord> MemOPSizes; std::vector<InstrProfValueSiteRecord> IndirectCallSites;
std::vector<InstrProfValueSiteRecord> MemOPSizes;
};
std::unique_ptr<ValueProfData> ValueData;
const std::vector<InstrProfValueSiteRecord> & MutableArrayRef<InstrProfValueSiteRecord>
getValueSitesForKind(uint32_t ValueKind) {
// Cast to /add/ const (should be an implicit_cast, ideally, if that's ever
// implemented in LLVM) to call the const overload of this function, then
// cast away the constness from the result.
auto AR = const_cast<const InstrProfRecord *>(this)->getValueSitesForKind(
ValueKind);
return makeMutableArrayRef(
const_cast<InstrProfValueSiteRecord *>(AR.data()), AR.size());
}
ArrayRef<InstrProfValueSiteRecord>
getValueSitesForKind(uint32_t ValueKind) const { getValueSitesForKind(uint32_t ValueKind) const {
if (!ValueData)
return None;
switch (ValueKind) { switch (ValueKind) {
case IPVK_IndirectCallTarget: case IPVK_IndirectCallTarget:
return IndirectCallSites; return ValueData->IndirectCallSites;
case IPVK_MemOPSize: case IPVK_MemOPSize:
return MemOPSizes; return ValueData->MemOPSizes;
default: default:
llvm_unreachable("Unknown value kind!"); llvm_unreachable("Unknown value kind!");
} }
return IndirectCallSites;
} }
std::vector<InstrProfValueSiteRecord> & std::vector<InstrProfValueSiteRecord> &
getValueSitesForKind(uint32_t ValueKind) { getOrCreateValueSitesForKind(uint32_t ValueKind) {
return const_cast<std::vector<InstrProfValueSiteRecord> &>( if (!ValueData)
const_cast<const InstrProfRecord *>(this) ValueData = llvm::make_unique<ValueProfData>();
->getValueSitesForKind(ValueKind)); switch (ValueKind) {
case IPVK_IndirectCallTarget:
return ValueData->IndirectCallSites;
case IPVK_MemOPSize:
return ValueData->MemOPSizes;
default:
llvm_unreachable("Unknown value kind!");
}
} }
// Map indirect call target name hash to name string. // Map indirect call target name hash to name string.
@ -765,9 +802,9 @@ uint64_t InstrProfRecord::getValueForSite(InstrProfValueData Dest[],
} }
void InstrProfRecord::reserveSites(uint32_t ValueKind, uint32_t NumValueSites) { void InstrProfRecord::reserveSites(uint32_t ValueKind, uint32_t NumValueSites) {
std::vector<InstrProfValueSiteRecord> &ValueSites = if (!NumValueSites)
getValueSitesForKind(ValueKind); return;
ValueSites.reserve(NumValueSites); getOrCreateValueSitesForKind(ValueKind).reserve(NumValueSites);
} }
inline support::endianness getHostEndianness() { inline support::endianness getHostEndianness() {

View File

@ -504,9 +504,11 @@ void InstrProfRecord::mergeValueProfData(uint32_t ValueKind,
SIPE.addError(instrprof_error::value_site_count_mismatch); SIPE.addError(instrprof_error::value_site_count_mismatch);
return; return;
} }
if (!ThisNumValueSites)
return;
std::vector<InstrProfValueSiteRecord> &ThisSiteRecords = std::vector<InstrProfValueSiteRecord> &ThisSiteRecords =
getValueSitesForKind(ValueKind); getOrCreateValueSitesForKind(ValueKind);
std::vector<InstrProfValueSiteRecord> &OtherSiteRecords = MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords =
Src.getValueSitesForKind(ValueKind); Src.getValueSitesForKind(ValueKind);
for (uint32_t I = 0; I < ThisNumValueSites; I++) for (uint32_t I = 0; I < ThisNumValueSites; I++)
ThisSiteRecords[I].merge(SIPE, OtherSiteRecords[I], Weight); ThisSiteRecords[I].merge(SIPE, OtherSiteRecords[I], Weight);
@ -533,11 +535,8 @@ void InstrProfRecord::merge(InstrProfRecord &Other, uint64_t Weight) {
} }
void InstrProfRecord::scaleValueProfData(uint32_t ValueKind, uint64_t Weight) { void InstrProfRecord::scaleValueProfData(uint32_t ValueKind, uint64_t Weight) {
uint32_t ThisNumValueSites = getNumValueSites(ValueKind); for (auto &R : getValueSitesForKind(ValueKind))
std::vector<InstrProfValueSiteRecord> &ThisSiteRecords = R.scale(SIPE, Weight);
getValueSitesForKind(ValueKind);
for (uint32_t I = 0; I < ThisNumValueSites; I++)
ThisSiteRecords[I].scale(SIPE, Weight);
} }
void InstrProfRecord::scale(uint64_t Weight) { void InstrProfRecord::scale(uint64_t Weight) {
@ -583,7 +582,7 @@ void InstrProfRecord::addValueData(uint32_t ValueKind, uint32_t Site,
VData[I].Value = remapValue(VData[I].Value, ValueKind, ValueMap); VData[I].Value = remapValue(VData[I].Value, ValueKind, ValueMap);
} }
std::vector<InstrProfValueSiteRecord> &ValueSites = std::vector<InstrProfValueSiteRecord> &ValueSites =
getValueSitesForKind(ValueKind); getOrCreateValueSitesForKind(ValueKind);
if (N == 0) if (N == 0)
ValueSites.emplace_back(); ValueSites.emplace_back();
else else