llvm-project/llvm/lib/ProfileData/SampleProfReader.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1900 lines
62 KiB
C++
Raw Normal View History

//===- SampleProfReader.cpp - Read LLVM sample profile data ---------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the class that reads LLVM sample profiles. It
// supports three file formats: text, binary and gcov.
//
// The textual representation is useful for debugging and testing purposes. The
// binary representation is more compact, resulting in smaller file sizes.
//
// The gcov encoding is the one generated by GCC's AutoFDO profile creation
// tool (https://github.com/google/autofdo)
//
// All three encodings can be used interchangeably as an input sample profile.
//
//===----------------------------------------------------------------------===//
#include "llvm/ProfileData/SampleProfReader.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/ProfileData/ProfileCommon.h"
#include "llvm/ProfileData/SampleProf.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <system_error>
#include <vector>
using namespace llvm;
using namespace sampleprof;
#define DEBUG_TYPE "samplepgo-reader"
// This internal option specifies if the profile uses FS discriminators.
// It only applies to text, binary and compact binary format profiles.
// For ext-binary format profiles, the flag is set in the summary.
static cl::opt<bool> ProfileIsFSDisciminator(
"profile-isfs", cl::Hidden, cl::init(false),
cl::desc("Profile uses flow sensitive discriminators"));
/// Dump the function profile for \p FName.
///
/// \param FContext Name + context of the function to print.
/// \param OS Stream to emit the output to.
[CSSPGO] Split context string to deduplicate function name used in the context. Currently context strings contain a lot of duplicated function names and that significantly increase the profile size. This change split the context into a series of {name, offset, discriminator} tuples so function names used in the context can be replaced by the index into the name table and that significantly reduce the size consumed by context. A follow-up improvement made in the compiler and profiling tools is to avoid reconstructing full context strings which is time- and memory- consuming. Instead a context vector of `StringRef` is adopted to represent the full context in all scenarios. As a result, the previous prevalent profile map which was implemented as a `StringRef` is now engineered as an unordered map keyed by `SampleContext`. `SampleContext` is reshaped to using an `ArrayRef` to represent a full context for CS profile. For non-CS profile, it falls back to use `StringRef` to represent a contextless function name. Both the `ArrayRef` and `StringRef` objects are underpinned by real array and string objects that are stored in producer buffers. For compiler, they are maintained by the sample reader. For llvm-profgen, they are maintained in `ProfiledBinary` and `ProfileGenerator`. Full context strings can be generated only in those cases of debugging and printing. When it comes to profile format, nothing has changed to the text format, though internally CS context is implemented as a vector. Extbinary format is only changed for CS profile, with an additional `SecCSNameTable` section which stores all full contexts logically in the form of `vector<int>`, which each element as an offset points to `SecNameTable`. All occurrences of contexts elsewhere are redirected to using the offset of `SecCSNameTable`. Testing This is no-diff change in terms of code quality and profile content (for text profile). For our internal large service (aka ads), the profile generation is cut to half, with a 20x smaller string-based extbinary format generated. The compile time of ads is dropped by 25%. Differential Revision: https://reviews.llvm.org/D107299
2021-08-26 02:40:34 +08:00
void SampleProfileReader::dumpFunctionProfile(SampleContext FContext,
raw_ostream &OS) {
[CSSPGO] Split context string to deduplicate function name used in the context. Currently context strings contain a lot of duplicated function names and that significantly increase the profile size. This change split the context into a series of {name, offset, discriminator} tuples so function names used in the context can be replaced by the index into the name table and that significantly reduce the size consumed by context. A follow-up improvement made in the compiler and profiling tools is to avoid reconstructing full context strings which is time- and memory- consuming. Instead a context vector of `StringRef` is adopted to represent the full context in all scenarios. As a result, the previous prevalent profile map which was implemented as a `StringRef` is now engineered as an unordered map keyed by `SampleContext`. `SampleContext` is reshaped to using an `ArrayRef` to represent a full context for CS profile. For non-CS profile, it falls back to use `StringRef` to represent a contextless function name. Both the `ArrayRef` and `StringRef` objects are underpinned by real array and string objects that are stored in producer buffers. For compiler, they are maintained by the sample reader. For llvm-profgen, they are maintained in `ProfiledBinary` and `ProfileGenerator`. Full context strings can be generated only in those cases of debugging and printing. When it comes to profile format, nothing has changed to the text format, though internally CS context is implemented as a vector. Extbinary format is only changed for CS profile, with an additional `SecCSNameTable` section which stores all full contexts logically in the form of `vector<int>`, which each element as an offset points to `SecNameTable`. All occurrences of contexts elsewhere are redirected to using the offset of `SecCSNameTable`. Testing This is no-diff change in terms of code quality and profile content (for text profile). For our internal large service (aka ads), the profile generation is cut to half, with a 20x smaller string-based extbinary format generated. The compile time of ads is dropped by 25%. Differential Revision: https://reviews.llvm.org/D107299
2021-08-26 02:40:34 +08:00
OS << "Function: " << FContext.toString() << ": " << Profiles[FContext];
}
/// Dump all the function profiles found on stream \p OS.
void SampleProfileReader::dump(raw_ostream &OS) {
std::vector<NameFunctionSamples> V;
sortFuncProfiles(Profiles, V);
for (const auto &I : V)
dumpFunctionProfile(I.first, OS);
}
/// Parse \p Input as function head.
///
/// Parse one line of \p Input, and update function name in \p FName,
/// function's total sample count in \p NumSamples, function's entry
/// count in \p NumHeadSamples.
///
/// \returns true if parsing is successful.
static bool ParseHead(const StringRef &Input, StringRef &FName,
uint64_t &NumSamples, uint64_t &NumHeadSamples) {
if (Input[0] == ' ')
return false;
size_t n2 = Input.rfind(':');
size_t n1 = Input.rfind(':', n2 - 1);
FName = Input.substr(0, n1);
if (Input.substr(n1 + 1, n2 - n1 - 1).getAsInteger(10, NumSamples))
return false;
if (Input.substr(n2 + 1).getAsInteger(10, NumHeadSamples))
return false;
return true;
}
/// Returns true if line offset \p L is legal (only has 16 bits).
static bool isOffsetLegal(unsigned L) { return (L & 0xffff) == L; }
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
/// Parse \p Input that contains metadata.
/// Possible metadata:
/// - CFG Checksum information:
/// !CFGChecksum: 12345
/// - CFG Checksum information:
/// !Attributes: 1
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
/// Stores the FunctionHash (a.k.a. CFG Checksum) into \p FunctionHash.
static bool parseMetadata(const StringRef &Input, uint64_t &FunctionHash,
uint32_t &Attributes) {
if (Input.startswith("!CFGChecksum:")) {
StringRef CFGInfo = Input.substr(strlen("!CFGChecksum:")).trim();
return !CFGInfo.getAsInteger(10, FunctionHash);
}
if (Input.startswith("!Attributes:")) {
StringRef Attrib = Input.substr(strlen("!Attributes:")).trim();
return !Attrib.getAsInteger(10, Attributes);
}
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
return false;
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
}
enum class LineType {
CallSiteProfile,
BodyProfile,
Metadata,
};
/// Parse \p Input as line sample.
///
/// \param Input input line.
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
/// \param LineTy Type of this line.
/// \param Depth the depth of the inline stack.
/// \param NumSamples total samples of the line/inlined callsite.
/// \param LineOffset line offset to the start of the function.
/// \param Discriminator discriminator of the line.
/// \param TargetCountMap map from indirect call target to count.
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
/// \param FunctionHash the function's CFG hash, used by pseudo probe.
///
/// returns true if parsing is successful.
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
static bool ParseLine(const StringRef &Input, LineType &LineTy, uint32_t &Depth,
uint64_t &NumSamples, uint32_t &LineOffset,
uint32_t &Discriminator, StringRef &CalleeName,
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
DenseMap<StringRef, uint64_t> &TargetCountMap,
uint64_t &FunctionHash, uint32_t &Attributes) {
for (Depth = 0; Input[Depth] == ' '; Depth++)
;
if (Depth == 0)
return false;
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
if (Input[Depth] == '!') {
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
LineTy = LineType::Metadata;
return parseMetadata(Input.substr(Depth), FunctionHash, Attributes);
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
}
size_t n1 = Input.find(':');
StringRef Loc = Input.substr(Depth, n1 - Depth);
size_t n2 = Loc.find('.');
if (n2 == StringRef::npos) {
if (Loc.getAsInteger(10, LineOffset) || !isOffsetLegal(LineOffset))
return false;
Discriminator = 0;
} else {
if (Loc.substr(0, n2).getAsInteger(10, LineOffset))
return false;
if (Loc.substr(n2 + 1).getAsInteger(10, Discriminator))
return false;
}
StringRef Rest = Input.substr(n1 + 2);
2021-01-22 11:59:50 +08:00
if (isDigit(Rest[0])) {
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
LineTy = LineType::BodyProfile;
size_t n3 = Rest.find(' ');
if (n3 == StringRef::npos) {
if (Rest.getAsInteger(10, NumSamples))
return false;
} else {
if (Rest.substr(0, n3).getAsInteger(10, NumSamples))
return false;
}
// Find call targets and their sample counts.
// Note: In some cases, there are symbols in the profile which are not
// mangled. To accommodate such cases, use colon + integer pairs as the
// anchor points.
// An example:
// _M_construct<char *>:1000 string_view<std::allocator<char> >:437
// ":1000" and ":437" are used as anchor points so the string above will
// be interpreted as
// target: _M_construct<char *>
// count: 1000
// target: string_view<std::allocator<char> >
// count: 437
while (n3 != StringRef::npos) {
n3 += Rest.substr(n3).find_first_not_of(' ');
Rest = Rest.substr(n3);
n3 = Rest.find_first_of(':');
if (n3 == StringRef::npos || n3 == 0)
return false;
StringRef Target;
uint64_t count, n4;
while (true) {
// Get the segment after the current colon.
StringRef AfterColon = Rest.substr(n3 + 1);
// Get the target symbol before the current colon.
Target = Rest.substr(0, n3);
// Check if the word after the current colon is an integer.
n4 = AfterColon.find_first_of(' ');
n4 = (n4 != StringRef::npos) ? n3 + n4 + 1 : Rest.size();
StringRef WordAfterColon = Rest.substr(n3 + 1, n4 - n3 - 1);
if (!WordAfterColon.getAsInteger(10, count))
break;
// Try to find the next colon.
uint64_t n5 = AfterColon.find_first_of(':');
if (n5 == StringRef::npos)
return false;
n3 += n5 + 1;
}
// An anchor point is found. Save the {target, count} pair
TargetCountMap[Target] = count;
if (n4 == Rest.size())
break;
// Change n3 to the next blank space after colon + integer pair.
n3 = n4;
}
} else {
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
LineTy = LineType::CallSiteProfile;
size_t n3 = Rest.find_last_of(':');
CalleeName = Rest.substr(0, n3);
if (Rest.substr(n3 + 1).getAsInteger(10, NumSamples))
return false;
}
return true;
}
/// Load samples from a text file.
///
/// See the documentation at the top of the file for an explanation of
/// the expected format.
///
/// \returns true if the file was loaded successfully, false otherwise.
std::error_code SampleProfileReaderText::readImpl() {
line_iterator LineIt(*Buffer, /*SkipBlanks=*/true, '#');
sampleprof_error Result = sampleprof_error::success;
InlineCallStack InlineStack;
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
uint32_t TopLevelProbeProfileCount = 0;
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
// DepthMetadata tracks whether we have processed metadata for the current
// top-level or nested function profile.
uint32_t DepthMetadata = 0;
ProfileIsFS = ProfileIsFSDisciminator;
FunctionSamples::ProfileIsFS = ProfileIsFS;
for (; !LineIt.is_at_eof(); ++LineIt) {
if ((*LineIt)[(*LineIt).find_first_not_of(' ')] == '#')
continue;
// Read the header of each function.
//
// Note that for function identifiers we are actually expecting
// mangled names, but we may not always get them. This happens when
// the compiler decides not to emit the function (e.g., it was inlined
// and removed). In this case, the binary will not have the linkage
// name for the function, so the profiler will emit the function's
// unmangled name, which may contain characters like ':' and '>' in its
// name (member functions, templates, etc).
//
// The only requirement we place on the identifier, then, is that it
// should not begin with a number.
if ((*LineIt)[0] != ' ') {
uint64_t NumSamples, NumHeadSamples;
StringRef FName;
if (!ParseHead(*LineIt, FName, NumSamples, NumHeadSamples)) {
reportError(LineIt.line_number(),
"Expected 'mangled_name:NUM:NUM', found " + *LineIt);
return sampleprof_error::malformed;
}
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
DepthMetadata = 0;
[CSSPGO] Split context string to deduplicate function name used in the context. Currently context strings contain a lot of duplicated function names and that significantly increase the profile size. This change split the context into a series of {name, offset, discriminator} tuples so function names used in the context can be replaced by the index into the name table and that significantly reduce the size consumed by context. A follow-up improvement made in the compiler and profiling tools is to avoid reconstructing full context strings which is time- and memory- consuming. Instead a context vector of `StringRef` is adopted to represent the full context in all scenarios. As a result, the previous prevalent profile map which was implemented as a `StringRef` is now engineered as an unordered map keyed by `SampleContext`. `SampleContext` is reshaped to using an `ArrayRef` to represent a full context for CS profile. For non-CS profile, it falls back to use `StringRef` to represent a contextless function name. Both the `ArrayRef` and `StringRef` objects are underpinned by real array and string objects that are stored in producer buffers. For compiler, they are maintained by the sample reader. For llvm-profgen, they are maintained in `ProfiledBinary` and `ProfileGenerator`. Full context strings can be generated only in those cases of debugging and printing. When it comes to profile format, nothing has changed to the text format, though internally CS context is implemented as a vector. Extbinary format is only changed for CS profile, with an additional `SecCSNameTable` section which stores all full contexts logically in the form of `vector<int>`, which each element as an offset points to `SecNameTable`. All occurrences of contexts elsewhere are redirected to using the offset of `SecCSNameTable`. Testing This is no-diff change in terms of code quality and profile content (for text profile). For our internal large service (aka ads), the profile generation is cut to half, with a 20x smaller string-based extbinary format generated. The compile time of ads is dropped by 25%. Differential Revision: https://reviews.llvm.org/D107299
2021-08-26 02:40:34 +08:00
SampleContext FContext(FName, CSNameTable);
[CSSPGO] Infrastructure for context-sensitive Sample PGO and Inlining This change adds the context-senstive sample PGO infracture described in CSSPGO RFC (https://groups.google.com/g/llvm-dev/c/1p1rdYbL93s). It introduced an abstraction between input profile and profile loader that queries input profile for functions. Specifically, there's now the notion of base profile and context profile, and they are managed by the new SampleContextTracker for adjusting and merging profiles based on inline decisions. It works with top-down profiled guided inliner in profile loader (https://reviews.llvm.org/D70655) for better inlining with specialization and better post-inline profile fidelity. In the future, we can also expose this infrastructure to CGSCC inliner in order for it to take advantage of context-sensitive profile. This change is the consumption part of context-sensitive profile (The generation part is in this stack: https://reviews.llvm.org/D89707). We've seen good results internally in conjunction with Pseudo-probe (https://reviews.llvm.org/D86193). Pacthes for integration with Pseudo-probe coming up soon. Currently the new infrastructure kick in when input profile contains the new context-sensitive profile; otherwise it's no-op and does not affect existing AutoFDO. **Interface** There're two sets of interfaces for query and tracking respectively exposed from SampleContextTracker. For query, now instead of simply getting a profile from input for a function, we can explicitly query base profile or context profile for given call path of a function. For tracking, there're separate APIs for marking context profile as inlined, or promoting and merging not inlined context profile. - Query base profile (`getBaseSamplesFor`) Base profile is the merged synthetic profile for function's CFG profile from any outstanding (not inlined) context. We can query base profile by function. - Query context profile (`getContextSamplesFor`) Context profile is a function's CFG profile for a given calling context. We can query context profile by context string. - Track inlined context profile (`markContextSamplesInlined`) When a function is inlined for given calling context, we need to mark the context profile for that context as inlined. This is to make sure we don't include inlined context profile when synthesizing base profile for that inlined function. - Track not-inlined context profile (`promoteMergeContextSamplesTree`) When a function is not inlined for given calling context, we need to promote the context profile tree so the not inlined context becomes top-level context. This preserve the sub-context under that function so later inline decision for that not inlined function will still have context profile for its call tree. Note that profile will be merged if needed when promoting a context profile tree if any of the node already exists at its promoted destination. **Implementation** Implementation-wise, `SampleContext` is created as abstraction for context. Currently it's a string for call path, and we can later optimize it to something more efficient, e.g. context id. Each `SampleContext` also has a `ContextState` indicating whether it's raw context profile from input, whether it's inlined or merged, whether it's synthetic profile created by compiler. Each `FunctionSamples` now has a `SampleContext` that tells whether it's base profile or context profile, and for context profile what is the context and state. On top of the above context representation, a custom trie tree is implemented to track and manager context profiles. Specifically, `SampleContextTracker` is implemented that encapsulates a trie tree with `ContextTireNode` as node. Each node of the trie tree represents a frame in calling context, thus the path from root to a node represents a valid calling context. We also track `FunctionSamples` for each node, so this trie tree can serve efficient query for context profile. Accordingly, context profile tree promotion now becomes moving a subtree to be under the root of entire tree, and merge nodes for subtree if this move encounters existing nodes. **Integration** `SampleContextTracker` is now also integrated with AutoFDO, `SampleProfileReader` and `SampleProfileLoader`. When we detected input profile contains context-sensitive profile, `SampleContextTracker` will be used to track profiles, and all profile query will go to `SampleContextTracker` instead of `SampleProfileReader` automatically. Tracking APIs are called automatically for each inline decision from `SampleProfileLoader`. Differential Revision: https://reviews.llvm.org/D90125
2020-03-24 14:50:41 +08:00
if (FContext.hasContext())
++CSProfileCount;
Profiles[FContext] = FunctionSamples();
FunctionSamples &FProfile = Profiles[FContext];
FProfile.setContext(FContext);
MergeResult(Result, FProfile.addTotalSamples(NumSamples));
MergeResult(Result, FProfile.addHeadSamples(NumHeadSamples));
InlineStack.clear();
InlineStack.push_back(&FProfile);
} else {
uint64_t NumSamples;
StringRef FName;
DenseMap<StringRef, uint64_t> TargetCountMap;
uint32_t Depth, LineOffset, Discriminator;
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
LineType LineTy;
uint64_t FunctionHash = 0;
uint32_t Attributes = 0;
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
if (!ParseLine(*LineIt, LineTy, Depth, NumSamples, LineOffset,
Discriminator, FName, TargetCountMap, FunctionHash,
Attributes)) {
reportError(LineIt.line_number(),
"Expected 'NUM[.NUM]: NUM[ mangled_name:NUM]*', found " +
*LineIt);
return sampleprof_error::malformed;
}
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
if (LineTy != LineType::Metadata && Depth == DepthMetadata) {
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
// Metadata must be put at the end of a function profile.
reportError(LineIt.line_number(),
"Found non-metadata after metadata: " + *LineIt);
return sampleprof_error::malformed;
}
// Here we handle FS discriminators.
Discriminator &= getDiscriminatorMask();
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
while (InlineStack.size() > Depth) {
InlineStack.pop_back();
}
switch (LineTy) {
case LineType::CallSiteProfile: {
FunctionSamples &FSamples = InlineStack.back()->functionSamplesAt(
LineLocation(LineOffset, Discriminator))[std::string(FName)];
FSamples.setName(FName);
MergeResult(Result, FSamples.addTotalSamples(NumSamples));
InlineStack.push_back(&FSamples);
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
DepthMetadata = 0;
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
break;
}
case LineType::BodyProfile: {
while (InlineStack.size() > Depth) {
InlineStack.pop_back();
}
FunctionSamples &FProfile = *InlineStack.back();
for (const auto &name_count : TargetCountMap) {
MergeResult(Result, FProfile.addCalledTargetSamples(
LineOffset, Discriminator, name_count.first,
name_count.second));
}
MergeResult(Result, FProfile.addBodySamples(LineOffset, Discriminator,
NumSamples));
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
break;
}
case LineType::Metadata: {
FunctionSamples &FProfile = *InlineStack.back();
if (FunctionHash) {
FProfile.setFunctionHash(FunctionHash);
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
if (Depth == 1)
++TopLevelProbeProfileCount;
}
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
FProfile.getContext().setAllAttributes(Attributes);
if (Attributes & (uint32_t)ContextShouldBeInlined)
ProfileIsCSNested = true;
DepthMetadata = Depth;
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
break;
}
}
}
}
[CSSPGO] Infrastructure for context-sensitive Sample PGO and Inlining This change adds the context-senstive sample PGO infracture described in CSSPGO RFC (https://groups.google.com/g/llvm-dev/c/1p1rdYbL93s). It introduced an abstraction between input profile and profile loader that queries input profile for functions. Specifically, there's now the notion of base profile and context profile, and they are managed by the new SampleContextTracker for adjusting and merging profiles based on inline decisions. It works with top-down profiled guided inliner in profile loader (https://reviews.llvm.org/D70655) for better inlining with specialization and better post-inline profile fidelity. In the future, we can also expose this infrastructure to CGSCC inliner in order for it to take advantage of context-sensitive profile. This change is the consumption part of context-sensitive profile (The generation part is in this stack: https://reviews.llvm.org/D89707). We've seen good results internally in conjunction with Pseudo-probe (https://reviews.llvm.org/D86193). Pacthes for integration with Pseudo-probe coming up soon. Currently the new infrastructure kick in when input profile contains the new context-sensitive profile; otherwise it's no-op and does not affect existing AutoFDO. **Interface** There're two sets of interfaces for query and tracking respectively exposed from SampleContextTracker. For query, now instead of simply getting a profile from input for a function, we can explicitly query base profile or context profile for given call path of a function. For tracking, there're separate APIs for marking context profile as inlined, or promoting and merging not inlined context profile. - Query base profile (`getBaseSamplesFor`) Base profile is the merged synthetic profile for function's CFG profile from any outstanding (not inlined) context. We can query base profile by function. - Query context profile (`getContextSamplesFor`) Context profile is a function's CFG profile for a given calling context. We can query context profile by context string. - Track inlined context profile (`markContextSamplesInlined`) When a function is inlined for given calling context, we need to mark the context profile for that context as inlined. This is to make sure we don't include inlined context profile when synthesizing base profile for that inlined function. - Track not-inlined context profile (`promoteMergeContextSamplesTree`) When a function is not inlined for given calling context, we need to promote the context profile tree so the not inlined context becomes top-level context. This preserve the sub-context under that function so later inline decision for that not inlined function will still have context profile for its call tree. Note that profile will be merged if needed when promoting a context profile tree if any of the node already exists at its promoted destination. **Implementation** Implementation-wise, `SampleContext` is created as abstraction for context. Currently it's a string for call path, and we can later optimize it to something more efficient, e.g. context id. Each `SampleContext` also has a `ContextState` indicating whether it's raw context profile from input, whether it's inlined or merged, whether it's synthetic profile created by compiler. Each `FunctionSamples` now has a `SampleContext` that tells whether it's base profile or context profile, and for context profile what is the context and state. On top of the above context representation, a custom trie tree is implemented to track and manager context profiles. Specifically, `SampleContextTracker` is implemented that encapsulates a trie tree with `ContextTireNode` as node. Each node of the trie tree represents a frame in calling context, thus the path from root to a node represents a valid calling context. We also track `FunctionSamples` for each node, so this trie tree can serve efficient query for context profile. Accordingly, context profile tree promotion now becomes moving a subtree to be under the root of entire tree, and merge nodes for subtree if this move encounters existing nodes. **Integration** `SampleContextTracker` is now also integrated with AutoFDO, `SampleProfileReader` and `SampleProfileLoader`. When we detected input profile contains context-sensitive profile, `SampleContextTracker` will be used to track profiles, and all profile query will go to `SampleContextTracker` instead of `SampleProfileReader` automatically. Tracking APIs are called automatically for each inline decision from `SampleProfileLoader`. Differential Revision: https://reviews.llvm.org/D90125
2020-03-24 14:50:41 +08:00
assert((CSProfileCount == 0 || CSProfileCount == Profiles.size()) &&
[CSSPGO] Infrastructure for context-sensitive Sample PGO and Inlining This change adds the context-senstive sample PGO infracture described in CSSPGO RFC (https://groups.google.com/g/llvm-dev/c/1p1rdYbL93s). It introduced an abstraction between input profile and profile loader that queries input profile for functions. Specifically, there's now the notion of base profile and context profile, and they are managed by the new SampleContextTracker for adjusting and merging profiles based on inline decisions. It works with top-down profiled guided inliner in profile loader (https://reviews.llvm.org/D70655) for better inlining with specialization and better post-inline profile fidelity. In the future, we can also expose this infrastructure to CGSCC inliner in order for it to take advantage of context-sensitive profile. This change is the consumption part of context-sensitive profile (The generation part is in this stack: https://reviews.llvm.org/D89707). We've seen good results internally in conjunction with Pseudo-probe (https://reviews.llvm.org/D86193). Pacthes for integration with Pseudo-probe coming up soon. Currently the new infrastructure kick in when input profile contains the new context-sensitive profile; otherwise it's no-op and does not affect existing AutoFDO. **Interface** There're two sets of interfaces for query and tracking respectively exposed from SampleContextTracker. For query, now instead of simply getting a profile from input for a function, we can explicitly query base profile or context profile for given call path of a function. For tracking, there're separate APIs for marking context profile as inlined, or promoting and merging not inlined context profile. - Query base profile (`getBaseSamplesFor`) Base profile is the merged synthetic profile for function's CFG profile from any outstanding (not inlined) context. We can query base profile by function. - Query context profile (`getContextSamplesFor`) Context profile is a function's CFG profile for a given calling context. We can query context profile by context string. - Track inlined context profile (`markContextSamplesInlined`) When a function is inlined for given calling context, we need to mark the context profile for that context as inlined. This is to make sure we don't include inlined context profile when synthesizing base profile for that inlined function. - Track not-inlined context profile (`promoteMergeContextSamplesTree`) When a function is not inlined for given calling context, we need to promote the context profile tree so the not inlined context becomes top-level context. This preserve the sub-context under that function so later inline decision for that not inlined function will still have context profile for its call tree. Note that profile will be merged if needed when promoting a context profile tree if any of the node already exists at its promoted destination. **Implementation** Implementation-wise, `SampleContext` is created as abstraction for context. Currently it's a string for call path, and we can later optimize it to something more efficient, e.g. context id. Each `SampleContext` also has a `ContextState` indicating whether it's raw context profile from input, whether it's inlined or merged, whether it's synthetic profile created by compiler. Each `FunctionSamples` now has a `SampleContext` that tells whether it's base profile or context profile, and for context profile what is the context and state. On top of the above context representation, a custom trie tree is implemented to track and manager context profiles. Specifically, `SampleContextTracker` is implemented that encapsulates a trie tree with `ContextTireNode` as node. Each node of the trie tree represents a frame in calling context, thus the path from root to a node represents a valid calling context. We also track `FunctionSamples` for each node, so this trie tree can serve efficient query for context profile. Accordingly, context profile tree promotion now becomes moving a subtree to be under the root of entire tree, and merge nodes for subtree if this move encounters existing nodes. **Integration** `SampleContextTracker` is now also integrated with AutoFDO, `SampleProfileReader` and `SampleProfileLoader`. When we detected input profile contains context-sensitive profile, `SampleContextTracker` will be used to track profiles, and all profile query will go to `SampleContextTracker` instead of `SampleProfileReader` automatically. Tracking APIs are called automatically for each inline decision from `SampleProfileLoader`. Differential Revision: https://reviews.llvm.org/D90125
2020-03-24 14:50:41 +08:00
"Cannot have both context-sensitive and regular profile");
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
ProfileIsCSFlat = (CSProfileCount > 0);
assert((TopLevelProbeProfileCount == 0 ||
TopLevelProbeProfileCount == Profiles.size()) &&
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
"Cannot have both probe-based profiles and regular profiles");
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
ProfileIsProbeBased = (TopLevelProbeProfileCount > 0);
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
FunctionSamples::ProfileIsProbeBased = ProfileIsProbeBased;
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
FunctionSamples::ProfileIsCSFlat = ProfileIsCSFlat;
FunctionSamples::ProfileIsCSNested = ProfileIsCSNested;
[CSSPGO] Infrastructure for context-sensitive Sample PGO and Inlining This change adds the context-senstive sample PGO infracture described in CSSPGO RFC (https://groups.google.com/g/llvm-dev/c/1p1rdYbL93s). It introduced an abstraction between input profile and profile loader that queries input profile for functions. Specifically, there's now the notion of base profile and context profile, and they are managed by the new SampleContextTracker for adjusting and merging profiles based on inline decisions. It works with top-down profiled guided inliner in profile loader (https://reviews.llvm.org/D70655) for better inlining with specialization and better post-inline profile fidelity. In the future, we can also expose this infrastructure to CGSCC inliner in order for it to take advantage of context-sensitive profile. This change is the consumption part of context-sensitive profile (The generation part is in this stack: https://reviews.llvm.org/D89707). We've seen good results internally in conjunction with Pseudo-probe (https://reviews.llvm.org/D86193). Pacthes for integration with Pseudo-probe coming up soon. Currently the new infrastructure kick in when input profile contains the new context-sensitive profile; otherwise it's no-op and does not affect existing AutoFDO. **Interface** There're two sets of interfaces for query and tracking respectively exposed from SampleContextTracker. For query, now instead of simply getting a profile from input for a function, we can explicitly query base profile or context profile for given call path of a function. For tracking, there're separate APIs for marking context profile as inlined, or promoting and merging not inlined context profile. - Query base profile (`getBaseSamplesFor`) Base profile is the merged synthetic profile for function's CFG profile from any outstanding (not inlined) context. We can query base profile by function. - Query context profile (`getContextSamplesFor`) Context profile is a function's CFG profile for a given calling context. We can query context profile by context string. - Track inlined context profile (`markContextSamplesInlined`) When a function is inlined for given calling context, we need to mark the context profile for that context as inlined. This is to make sure we don't include inlined context profile when synthesizing base profile for that inlined function. - Track not-inlined context profile (`promoteMergeContextSamplesTree`) When a function is not inlined for given calling context, we need to promote the context profile tree so the not inlined context becomes top-level context. This preserve the sub-context under that function so later inline decision for that not inlined function will still have context profile for its call tree. Note that profile will be merged if needed when promoting a context profile tree if any of the node already exists at its promoted destination. **Implementation** Implementation-wise, `SampleContext` is created as abstraction for context. Currently it's a string for call path, and we can later optimize it to something more efficient, e.g. context id. Each `SampleContext` also has a `ContextState` indicating whether it's raw context profile from input, whether it's inlined or merged, whether it's synthetic profile created by compiler. Each `FunctionSamples` now has a `SampleContext` that tells whether it's base profile or context profile, and for context profile what is the context and state. On top of the above context representation, a custom trie tree is implemented to track and manager context profiles. Specifically, `SampleContextTracker` is implemented that encapsulates a trie tree with `ContextTireNode` as node. Each node of the trie tree represents a frame in calling context, thus the path from root to a node represents a valid calling context. We also track `FunctionSamples` for each node, so this trie tree can serve efficient query for context profile. Accordingly, context profile tree promotion now becomes moving a subtree to be under the root of entire tree, and merge nodes for subtree if this move encounters existing nodes. **Integration** `SampleContextTracker` is now also integrated with AutoFDO, `SampleProfileReader` and `SampleProfileLoader`. When we detected input profile contains context-sensitive profile, `SampleContextTracker` will be used to track profiles, and all profile query will go to `SampleContextTracker` instead of `SampleProfileReader` automatically. Tracking APIs are called automatically for each inline decision from `SampleProfileLoader`. Differential Revision: https://reviews.llvm.org/D90125
2020-03-24 14:50:41 +08:00
if (Result == sampleprof_error::success)
computeSummary();
return Result;
}
bool SampleProfileReaderText::hasFormat(const MemoryBuffer &Buffer) {
bool result = false;
// Check that the first non-comment line is a valid function header.
line_iterator LineIt(Buffer, /*SkipBlanks=*/true, '#');
if (!LineIt.is_at_eof()) {
if ((*LineIt)[0] != ' ') {
uint64_t NumSamples, NumHeadSamples;
StringRef FName;
result = ParseHead(*LineIt, FName, NumSamples, NumHeadSamples);
}
}
return result;
}
template <typename T> ErrorOr<T> SampleProfileReaderBinary::readNumber() {
unsigned NumBytesRead = 0;
std::error_code EC;
uint64_t Val = decodeULEB128(Data, &NumBytesRead);
if (Val > std::numeric_limits<T>::max())
EC = sampleprof_error::malformed;
else if (Data + NumBytesRead > End)
EC = sampleprof_error::truncated;
else
EC = sampleprof_error::success;
if (EC) {
reportError(0, EC.message());
return EC;
}
Data += NumBytesRead;
return static_cast<T>(Val);
}
ErrorOr<StringRef> SampleProfileReaderBinary::readString() {
std::error_code EC;
StringRef Str(reinterpret_cast<const char *>(Data));
if (Data + Str.size() + 1 > End) {
EC = sampleprof_error::truncated;
reportError(0, EC.message());
return EC;
}
Data += Str.size() + 1;
return Str;
}
template <typename T>
ErrorOr<T> SampleProfileReaderBinary::readUnencodedNumber() {
std::error_code EC;
if (Data + sizeof(T) > End) {
EC = sampleprof_error::truncated;
reportError(0, EC.message());
return EC;
}
using namespace support;
T Val = endian::readNext<T, little, unaligned>(Data);
return Val;
}
template <typename T>
inline ErrorOr<uint32_t> SampleProfileReaderBinary::readStringIndex(T &Table) {
std::error_code EC;
auto Idx = readNumber<uint32_t>();
if (std::error_code EC = Idx.getError())
return EC;
if (*Idx >= Table.size())
return sampleprof_error::truncated_name_table;
return *Idx;
}
ErrorOr<StringRef> SampleProfileReaderBinary::readStringFromTable() {
auto Idx = readStringIndex(NameTable);
if (std::error_code EC = Idx.getError())
return EC;
return NameTable[*Idx];
}
[CSSPGO] Split context string to deduplicate function name used in the context. Currently context strings contain a lot of duplicated function names and that significantly increase the profile size. This change split the context into a series of {name, offset, discriminator} tuples so function names used in the context can be replaced by the index into the name table and that significantly reduce the size consumed by context. A follow-up improvement made in the compiler and profiling tools is to avoid reconstructing full context strings which is time- and memory- consuming. Instead a context vector of `StringRef` is adopted to represent the full context in all scenarios. As a result, the previous prevalent profile map which was implemented as a `StringRef` is now engineered as an unordered map keyed by `SampleContext`. `SampleContext` is reshaped to using an `ArrayRef` to represent a full context for CS profile. For non-CS profile, it falls back to use `StringRef` to represent a contextless function name. Both the `ArrayRef` and `StringRef` objects are underpinned by real array and string objects that are stored in producer buffers. For compiler, they are maintained by the sample reader. For llvm-profgen, they are maintained in `ProfiledBinary` and `ProfileGenerator`. Full context strings can be generated only in those cases of debugging and printing. When it comes to profile format, nothing has changed to the text format, though internally CS context is implemented as a vector. Extbinary format is only changed for CS profile, with an additional `SecCSNameTable` section which stores all full contexts logically in the form of `vector<int>`, which each element as an offset points to `SecNameTable`. All occurrences of contexts elsewhere are redirected to using the offset of `SecCSNameTable`. Testing This is no-diff change in terms of code quality and profile content (for text profile). For our internal large service (aka ads), the profile generation is cut to half, with a 20x smaller string-based extbinary format generated. The compile time of ads is dropped by 25%. Differential Revision: https://reviews.llvm.org/D107299
2021-08-26 02:40:34 +08:00
ErrorOr<SampleContext> SampleProfileReaderBinary::readSampleContextFromTable() {
auto FName(readStringFromTable());
if (std::error_code EC = FName.getError())
return EC;
return SampleContext(*FName);
}
ErrorOr<StringRef> SampleProfileReaderExtBinaryBase::readStringFromTable() {
if (!FixedLengthMD5)
return SampleProfileReaderBinary::readStringFromTable();
// read NameTable index.
auto Idx = readStringIndex(NameTable);
if (std::error_code EC = Idx.getError())
return EC;
// Check whether the name to be accessed has been accessed before,
// if not, read it from memory directly.
StringRef &SR = NameTable[*Idx];
if (SR.empty()) {
const uint8_t *SavedData = Data;
Data = MD5NameMemStart + ((*Idx) * sizeof(uint64_t));
auto FID = readUnencodedNumber<uint64_t>();
if (std::error_code EC = FID.getError())
return EC;
// Save the string converted from uint64_t in MD5StringBuf. All the
// references to the name are all StringRefs refering to the string
// in MD5StringBuf.
MD5StringBuf->push_back(std::to_string(*FID));
SR = MD5StringBuf->back();
Data = SavedData;
}
return SR;
}
ErrorOr<StringRef> SampleProfileReaderCompactBinary::readStringFromTable() {
auto Idx = readStringIndex(NameTable);
if (std::error_code EC = Idx.getError())
return EC;
return StringRef(NameTable[*Idx]);
}
std::error_code
SampleProfileReaderBinary::readProfile(FunctionSamples &FProfile) {
auto NumSamples = readNumber<uint64_t>();
if (std::error_code EC = NumSamples.getError())
return EC;
FProfile.addTotalSamples(*NumSamples);
// Read the samples in the body.
auto NumRecords = readNumber<uint32_t>();
if (std::error_code EC = NumRecords.getError())
return EC;
for (uint32_t I = 0; I < *NumRecords; ++I) {
auto LineOffset = readNumber<uint64_t>();
if (std::error_code EC = LineOffset.getError())
return EC;
if (!isOffsetLegal(*LineOffset)) {
return std::error_code();
}
auto Discriminator = readNumber<uint64_t>();
if (std::error_code EC = Discriminator.getError())
return EC;
auto NumSamples = readNumber<uint64_t>();
if (std::error_code EC = NumSamples.getError())
return EC;
auto NumCalls = readNumber<uint32_t>();
if (std::error_code EC = NumCalls.getError())
return EC;
// Here we handle FS discriminators:
uint32_t DiscriminatorVal = (*Discriminator) & getDiscriminatorMask();
for (uint32_t J = 0; J < *NumCalls; ++J) {
auto CalledFunction(readStringFromTable());
if (std::error_code EC = CalledFunction.getError())
return EC;
auto CalledFunctionSamples = readNumber<uint64_t>();
if (std::error_code EC = CalledFunctionSamples.getError())
return EC;
FProfile.addCalledTargetSamples(*LineOffset, DiscriminatorVal,
*CalledFunction, *CalledFunctionSamples);
}
FProfile.addBodySamples(*LineOffset, DiscriminatorVal, *NumSamples);
}
// Read all the samples for inlined function calls.
auto NumCallsites = readNumber<uint32_t>();
if (std::error_code EC = NumCallsites.getError())
return EC;
for (uint32_t J = 0; J < *NumCallsites; ++J) {
auto LineOffset = readNumber<uint64_t>();
if (std::error_code EC = LineOffset.getError())
return EC;
auto Discriminator = readNumber<uint64_t>();
if (std::error_code EC = Discriminator.getError())
return EC;
auto FName(readStringFromTable());
if (std::error_code EC = FName.getError())
return EC;
// Here we handle FS discriminators:
uint32_t DiscriminatorVal = (*Discriminator) & getDiscriminatorMask();
FunctionSamples &CalleeProfile = FProfile.functionSamplesAt(
LineLocation(*LineOffset, DiscriminatorVal))[std::string(*FName)];
CalleeProfile.setName(*FName);
if (std::error_code EC = readProfile(CalleeProfile))
return EC;
}
return sampleprof_error::success;
}
std::error_code
SampleProfileReaderBinary::readFuncProfile(const uint8_t *Start) {
Data = Start;
auto NumHeadSamples = readNumber<uint64_t>();
if (std::error_code EC = NumHeadSamples.getError())
return EC;
[CSSPGO] Split context string to deduplicate function name used in the context. Currently context strings contain a lot of duplicated function names and that significantly increase the profile size. This change split the context into a series of {name, offset, discriminator} tuples so function names used in the context can be replaced by the index into the name table and that significantly reduce the size consumed by context. A follow-up improvement made in the compiler and profiling tools is to avoid reconstructing full context strings which is time- and memory- consuming. Instead a context vector of `StringRef` is adopted to represent the full context in all scenarios. As a result, the previous prevalent profile map which was implemented as a `StringRef` is now engineered as an unordered map keyed by `SampleContext`. `SampleContext` is reshaped to using an `ArrayRef` to represent a full context for CS profile. For non-CS profile, it falls back to use `StringRef` to represent a contextless function name. Both the `ArrayRef` and `StringRef` objects are underpinned by real array and string objects that are stored in producer buffers. For compiler, they are maintained by the sample reader. For llvm-profgen, they are maintained in `ProfiledBinary` and `ProfileGenerator`. Full context strings can be generated only in those cases of debugging and printing. When it comes to profile format, nothing has changed to the text format, though internally CS context is implemented as a vector. Extbinary format is only changed for CS profile, with an additional `SecCSNameTable` section which stores all full contexts logically in the form of `vector<int>`, which each element as an offset points to `SecNameTable`. All occurrences of contexts elsewhere are redirected to using the offset of `SecCSNameTable`. Testing This is no-diff change in terms of code quality and profile content (for text profile). For our internal large service (aka ads), the profile generation is cut to half, with a 20x smaller string-based extbinary format generated. The compile time of ads is dropped by 25%. Differential Revision: https://reviews.llvm.org/D107299
2021-08-26 02:40:34 +08:00
ErrorOr<SampleContext> FContext(readSampleContextFromTable());
if (std::error_code EC = FContext.getError())
return EC;
[CSSPGO] Split context string to deduplicate function name used in the context. Currently context strings contain a lot of duplicated function names and that significantly increase the profile size. This change split the context into a series of {name, offset, discriminator} tuples so function names used in the context can be replaced by the index into the name table and that significantly reduce the size consumed by context. A follow-up improvement made in the compiler and profiling tools is to avoid reconstructing full context strings which is time- and memory- consuming. Instead a context vector of `StringRef` is adopted to represent the full context in all scenarios. As a result, the previous prevalent profile map which was implemented as a `StringRef` is now engineered as an unordered map keyed by `SampleContext`. `SampleContext` is reshaped to using an `ArrayRef` to represent a full context for CS profile. For non-CS profile, it falls back to use `StringRef` to represent a contextless function name. Both the `ArrayRef` and `StringRef` objects are underpinned by real array and string objects that are stored in producer buffers. For compiler, they are maintained by the sample reader. For llvm-profgen, they are maintained in `ProfiledBinary` and `ProfileGenerator`. Full context strings can be generated only in those cases of debugging and printing. When it comes to profile format, nothing has changed to the text format, though internally CS context is implemented as a vector. Extbinary format is only changed for CS profile, with an additional `SecCSNameTable` section which stores all full contexts logically in the form of `vector<int>`, which each element as an offset points to `SecNameTable`. All occurrences of contexts elsewhere are redirected to using the offset of `SecCSNameTable`. Testing This is no-diff change in terms of code quality and profile content (for text profile). For our internal large service (aka ads), the profile generation is cut to half, with a 20x smaller string-based extbinary format generated. The compile time of ads is dropped by 25%. Differential Revision: https://reviews.llvm.org/D107299
2021-08-26 02:40:34 +08:00
Profiles[*FContext] = FunctionSamples();
FunctionSamples &FProfile = Profiles[*FContext];
FProfile.setContext(*FContext);
FProfile.addHeadSamples(*NumHeadSamples);
[CSSPGO] Split context string to deduplicate function name used in the context. Currently context strings contain a lot of duplicated function names and that significantly increase the profile size. This change split the context into a series of {name, offset, discriminator} tuples so function names used in the context can be replaced by the index into the name table and that significantly reduce the size consumed by context. A follow-up improvement made in the compiler and profiling tools is to avoid reconstructing full context strings which is time- and memory- consuming. Instead a context vector of `StringRef` is adopted to represent the full context in all scenarios. As a result, the previous prevalent profile map which was implemented as a `StringRef` is now engineered as an unordered map keyed by `SampleContext`. `SampleContext` is reshaped to using an `ArrayRef` to represent a full context for CS profile. For non-CS profile, it falls back to use `StringRef` to represent a contextless function name. Both the `ArrayRef` and `StringRef` objects are underpinned by real array and string objects that are stored in producer buffers. For compiler, they are maintained by the sample reader. For llvm-profgen, they are maintained in `ProfiledBinary` and `ProfileGenerator`. Full context strings can be generated only in those cases of debugging and printing. When it comes to profile format, nothing has changed to the text format, though internally CS context is implemented as a vector. Extbinary format is only changed for CS profile, with an additional `SecCSNameTable` section which stores all full contexts logically in the form of `vector<int>`, which each element as an offset points to `SecNameTable`. All occurrences of contexts elsewhere are redirected to using the offset of `SecCSNameTable`. Testing This is no-diff change in terms of code quality and profile content (for text profile). For our internal large service (aka ads), the profile generation is cut to half, with a 20x smaller string-based extbinary format generated. The compile time of ads is dropped by 25%. Differential Revision: https://reviews.llvm.org/D107299
2021-08-26 02:40:34 +08:00
if (FContext->hasContext())
CSProfileCount++;
if (std::error_code EC = readProfile(FProfile))
return EC;
return sampleprof_error::success;
}
std::error_code SampleProfileReaderBinary::readImpl() {
ProfileIsFS = ProfileIsFSDisciminator;
FunctionSamples::ProfileIsFS = ProfileIsFS;
while (!at_eof()) {
if (std::error_code EC = readFuncProfile(Data))
return EC;
}
return sampleprof_error::success;
}
[CSSPGO] Split context string to deduplicate function name used in the context. Currently context strings contain a lot of duplicated function names and that significantly increase the profile size. This change split the context into a series of {name, offset, discriminator} tuples so function names used in the context can be replaced by the index into the name table and that significantly reduce the size consumed by context. A follow-up improvement made in the compiler and profiling tools is to avoid reconstructing full context strings which is time- and memory- consuming. Instead a context vector of `StringRef` is adopted to represent the full context in all scenarios. As a result, the previous prevalent profile map which was implemented as a `StringRef` is now engineered as an unordered map keyed by `SampleContext`. `SampleContext` is reshaped to using an `ArrayRef` to represent a full context for CS profile. For non-CS profile, it falls back to use `StringRef` to represent a contextless function name. Both the `ArrayRef` and `StringRef` objects are underpinned by real array and string objects that are stored in producer buffers. For compiler, they are maintained by the sample reader. For llvm-profgen, they are maintained in `ProfiledBinary` and `ProfileGenerator`. Full context strings can be generated only in those cases of debugging and printing. When it comes to profile format, nothing has changed to the text format, though internally CS context is implemented as a vector. Extbinary format is only changed for CS profile, with an additional `SecCSNameTable` section which stores all full contexts logically in the form of `vector<int>`, which each element as an offset points to `SecNameTable`. All occurrences of contexts elsewhere are redirected to using the offset of `SecCSNameTable`. Testing This is no-diff change in terms of code quality and profile content (for text profile). For our internal large service (aka ads), the profile generation is cut to half, with a 20x smaller string-based extbinary format generated. The compile time of ads is dropped by 25%. Differential Revision: https://reviews.llvm.org/D107299
2021-08-26 02:40:34 +08:00
ErrorOr<SampleContextFrames>
SampleProfileReaderExtBinaryBase::readContextFromTable() {
auto ContextIdx = readNumber<uint32_t>();
if (std::error_code EC = ContextIdx.getError())
return EC;
if (*ContextIdx >= CSNameTable->size())
return sampleprof_error::truncated_name_table;
return (*CSNameTable)[*ContextIdx];
}
ErrorOr<SampleContext>
SampleProfileReaderExtBinaryBase::readSampleContextFromTable() {
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
if (ProfileIsCSFlat) {
[CSSPGO] Split context string to deduplicate function name used in the context. Currently context strings contain a lot of duplicated function names and that significantly increase the profile size. This change split the context into a series of {name, offset, discriminator} tuples so function names used in the context can be replaced by the index into the name table and that significantly reduce the size consumed by context. A follow-up improvement made in the compiler and profiling tools is to avoid reconstructing full context strings which is time- and memory- consuming. Instead a context vector of `StringRef` is adopted to represent the full context in all scenarios. As a result, the previous prevalent profile map which was implemented as a `StringRef` is now engineered as an unordered map keyed by `SampleContext`. `SampleContext` is reshaped to using an `ArrayRef` to represent a full context for CS profile. For non-CS profile, it falls back to use `StringRef` to represent a contextless function name. Both the `ArrayRef` and `StringRef` objects are underpinned by real array and string objects that are stored in producer buffers. For compiler, they are maintained by the sample reader. For llvm-profgen, they are maintained in `ProfiledBinary` and `ProfileGenerator`. Full context strings can be generated only in those cases of debugging and printing. When it comes to profile format, nothing has changed to the text format, though internally CS context is implemented as a vector. Extbinary format is only changed for CS profile, with an additional `SecCSNameTable` section which stores all full contexts logically in the form of `vector<int>`, which each element as an offset points to `SecNameTable`. All occurrences of contexts elsewhere are redirected to using the offset of `SecCSNameTable`. Testing This is no-diff change in terms of code quality and profile content (for text profile). For our internal large service (aka ads), the profile generation is cut to half, with a 20x smaller string-based extbinary format generated. The compile time of ads is dropped by 25%. Differential Revision: https://reviews.llvm.org/D107299
2021-08-26 02:40:34 +08:00
auto FContext(readContextFromTable());
if (std::error_code EC = FContext.getError())
return EC;
return SampleContext(*FContext);
} else {
auto FName(readStringFromTable());
if (std::error_code EC = FName.getError())
return EC;
return SampleContext(*FName);
}
}
std::error_code SampleProfileReaderExtBinaryBase::readOneSection(
const uint8_t *Start, uint64_t Size, const SecHdrTableEntry &Entry) {
Data = Start;
End = Start + Size;
switch (Entry.Type) {
case SecProfSummary:
if (std::error_code EC = readSummary())
return EC;
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagPartial))
Summary->setPartialProfile(true);
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagFullContext))
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
FunctionSamples::ProfileIsCSFlat = ProfileIsCSFlat = true;
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagIsCSNested))
FunctionSamples::ProfileIsCSNested = ProfileIsCSNested = true;
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagFSDiscriminator))
FunctionSamples::ProfileIsFS = ProfileIsFS = true;
break;
case SecNameTable: {
FixedLengthMD5 =
hasSecFlag(Entry, SecNameTableFlags::SecFlagFixedLengthMD5);
bool UseMD5 = hasSecFlag(Entry, SecNameTableFlags::SecFlagMD5Name);
assert((!FixedLengthMD5 || UseMD5) &&
"If FixedLengthMD5 is true, UseMD5 has to be true");
FunctionSamples::HasUniqSuffix =
hasSecFlag(Entry, SecNameTableFlags::SecFlagUniqSuffix);
if (std::error_code EC = readNameTableSec(UseMD5))
return EC;
break;
}
[CSSPGO] Split context string to deduplicate function name used in the context. Currently context strings contain a lot of duplicated function names and that significantly increase the profile size. This change split the context into a series of {name, offset, discriminator} tuples so function names used in the context can be replaced by the index into the name table and that significantly reduce the size consumed by context. A follow-up improvement made in the compiler and profiling tools is to avoid reconstructing full context strings which is time- and memory- consuming. Instead a context vector of `StringRef` is adopted to represent the full context in all scenarios. As a result, the previous prevalent profile map which was implemented as a `StringRef` is now engineered as an unordered map keyed by `SampleContext`. `SampleContext` is reshaped to using an `ArrayRef` to represent a full context for CS profile. For non-CS profile, it falls back to use `StringRef` to represent a contextless function name. Both the `ArrayRef` and `StringRef` objects are underpinned by real array and string objects that are stored in producer buffers. For compiler, they are maintained by the sample reader. For llvm-profgen, they are maintained in `ProfiledBinary` and `ProfileGenerator`. Full context strings can be generated only in those cases of debugging and printing. When it comes to profile format, nothing has changed to the text format, though internally CS context is implemented as a vector. Extbinary format is only changed for CS profile, with an additional `SecCSNameTable` section which stores all full contexts logically in the form of `vector<int>`, which each element as an offset points to `SecNameTable`. All occurrences of contexts elsewhere are redirected to using the offset of `SecCSNameTable`. Testing This is no-diff change in terms of code quality and profile content (for text profile). For our internal large service (aka ads), the profile generation is cut to half, with a 20x smaller string-based extbinary format generated. The compile time of ads is dropped by 25%. Differential Revision: https://reviews.llvm.org/D107299
2021-08-26 02:40:34 +08:00
case SecCSNameTable: {
if (std::error_code EC = readCSNameTableSec())
return EC;
break;
}
case SecLBRProfile:
if (std::error_code EC = readFuncProfiles())
return EC;
break;
case SecFuncOffsetTable:
FuncOffsetsOrdered = hasSecFlag(Entry, SecFuncOffsetFlags::SecFlagOrdered);
if (std::error_code EC = readFuncOffsetTable())
return EC;
break;
case SecFuncMetadata: {
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
ProfileIsProbeBased =
hasSecFlag(Entry, SecFuncMetadataFlags::SecFlagIsProbeBased);
FunctionSamples::ProfileIsProbeBased = ProfileIsProbeBased;
bool HasAttribute =
hasSecFlag(Entry, SecFuncMetadataFlags::SecFlagHasAttribute);
if (std::error_code EC = readFuncMetadata(HasAttribute))
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
return EC;
break;
}
case SecProfileSymbolList:
if (std::error_code EC = readProfileSymbolList())
return EC;
break;
default:
if (std::error_code EC = readCustomSection(Entry))
return EC;
break;
}
return sampleprof_error::success;
}
bool SampleProfileReaderExtBinaryBase::collectFuncsFromModule() {
if (!M)
return false;
FuncsToUse.clear();
for (auto &F : *M)
FuncsToUse.insert(FunctionSamples::getCanonicalFnName(F));
return true;
}
std::error_code SampleProfileReaderExtBinaryBase::readFuncOffsetTable() {
// If there are more than one FuncOffsetTable, the profile read associated
// with previous FuncOffsetTable has to be done before next FuncOffsetTable
// is read.
FuncOffsetTable.clear();
auto Size = readNumber<uint64_t>();
if (std::error_code EC = Size.getError())
return EC;
FuncOffsetTable.reserve(*Size);
if (FuncOffsetsOrdered) {
OrderedFuncOffsets =
std::make_unique<std::vector<std::pair<SampleContext, uint64_t>>>();
OrderedFuncOffsets->reserve(*Size);
}
for (uint32_t I = 0; I < *Size; ++I) {
auto FContext(readSampleContextFromTable());
if (std::error_code EC = FContext.getError())
return EC;
auto Offset = readNumber<uint64_t>();
if (std::error_code EC = Offset.getError())
return EC;
FuncOffsetTable[*FContext] = *Offset;
if (FuncOffsetsOrdered)
OrderedFuncOffsets->emplace_back(*FContext, *Offset);
}
return sampleprof_error::success;
}
std::error_code SampleProfileReaderExtBinaryBase::readFuncProfiles() {
// Collect functions used by current module if the Reader has been
// given a module.
// collectFuncsFromModule uses FunctionSamples::getCanonicalFnName
// which will query FunctionSamples::HasUniqSuffix, so it has to be
// called after FunctionSamples::HasUniqSuffix is set, i.e. after
// NameTable section is read.
bool LoadFuncsToBeUsed = collectFuncsFromModule();
// When LoadFuncsToBeUsed is false, load all the function profiles.
const uint8_t *Start = Data;
if (!LoadFuncsToBeUsed) {
while (Data < End) {
if (std::error_code EC = readFuncProfile(Data))
return EC;
}
assert(Data == End && "More data is read than expected");
} else {
// Load function profiles on demand.
if (Remapper) {
for (auto Name : FuncsToUse) {
Remapper->insert(Name);
}
}
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
if (ProfileIsCSFlat) {
DenseSet<uint64_t> FuncGuidsToUse;
if (useMD5()) {
for (auto Name : FuncsToUse)
FuncGuidsToUse.insert(Function::getGUID(Name));
}
// For each function in current module, load all context profiles for
// the function as well as their callee contexts which can help profile
// guided importing for ThinLTO. This can be achieved by walking
// through an ordered context container, where contexts are laid out
// as if they were walked in preorder of a context trie. While
// traversing the trie, a link to the highest common ancestor node is
// kept so that all of its decendants will be loaded.
assert(OrderedFuncOffsets.get() &&
"func offset table should always be sorted in CS profile");
const SampleContext *CommonContext = nullptr;
for (const auto &NameOffset : *OrderedFuncOffsets) {
const auto &FContext = NameOffset.first;
auto FName = FContext.getName();
// For function in the current module, keep its farthest ancestor
// context. This can be used to load itself and its child and
// sibling contexts.
if ((useMD5() && FuncGuidsToUse.count(std::stoull(FName.data()))) ||
(!useMD5() && (FuncsToUse.count(FName) ||
(Remapper && Remapper->exist(FName))))) {
if (!CommonContext || !CommonContext->IsPrefixOf(FContext))
CommonContext = &FContext;
}
if (CommonContext == &FContext ||
(CommonContext && CommonContext->IsPrefixOf(FContext))) {
// Load profile for the current context which originated from
// the common ancestor.
const uint8_t *FuncProfileAddr = Start + NameOffset.second;
assert(FuncProfileAddr < End && "out of LBRProfile section");
if (std::error_code EC = readFuncProfile(FuncProfileAddr))
return EC;
}
}
} else {
if (useMD5()) {
for (auto Name : FuncsToUse) {
auto GUID = std::to_string(MD5Hash(Name));
auto iter = FuncOffsetTable.find(StringRef(GUID));
if (iter == FuncOffsetTable.end())
continue;
const uint8_t *FuncProfileAddr = Start + iter->second;
assert(FuncProfileAddr < End && "out of LBRProfile section");
if (std::error_code EC = readFuncProfile(FuncProfileAddr))
return EC;
}
} else {
for (auto NameOffset : FuncOffsetTable) {
SampleContext FContext(NameOffset.first);
auto FuncName = FContext.getName();
if (!FuncsToUse.count(FuncName) &&
(!Remapper || !Remapper->exist(FuncName)))
continue;
const uint8_t *FuncProfileAddr = Start + NameOffset.second;
assert(FuncProfileAddr < End && "out of LBRProfile section");
if (std::error_code EC = readFuncProfile(FuncProfileAddr))
return EC;
}
}
}
Data = End;
}
assert((CSProfileCount == 0 || CSProfileCount == Profiles.size()) &&
"Cannot have both context-sensitive and regular profile");
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
assert((!CSProfileCount || ProfileIsCSFlat) &&
"Section flag should be consistent with actual profile");
return sampleprof_error::success;
}
std::error_code SampleProfileReaderExtBinaryBase::readProfileSymbolList() {
if (!ProfSymList)
ProfSymList = std::make_unique<ProfileSymbolList>();
if (std::error_code EC = ProfSymList->read(Data, End - Data))
return EC;
Data = End;
return sampleprof_error::success;
}
std::error_code SampleProfileReaderExtBinaryBase::decompressSection(
const uint8_t *SecStart, const uint64_t SecSize,
const uint8_t *&DecompressBuf, uint64_t &DecompressBufSize) {
Data = SecStart;
End = SecStart + SecSize;
auto DecompressSize = readNumber<uint64_t>();
if (std::error_code EC = DecompressSize.getError())
return EC;
DecompressBufSize = *DecompressSize;
auto CompressSize = readNumber<uint64_t>();
if (std::error_code EC = CompressSize.getError())
return EC;
if (!llvm::zlib::isAvailable())
return sampleprof_error::zlib_unavailable;
StringRef CompressedStrings(reinterpret_cast<const char *>(Data),
*CompressSize);
char *Buffer = Allocator.Allocate<char>(DecompressBufSize);
size_t UCSize = DecompressBufSize;
llvm::Error E =
zlib::uncompress(CompressedStrings, Buffer, UCSize);
if (E)
return sampleprof_error::uncompress_failed;
DecompressBuf = reinterpret_cast<const uint8_t *>(Buffer);
return sampleprof_error::success;
}
std::error_code SampleProfileReaderExtBinaryBase::readImpl() {
const uint8_t *BufStart =
reinterpret_cast<const uint8_t *>(Buffer->getBufferStart());
for (auto &Entry : SecHdrTable) {
// Skip empty section.
if (!Entry.Size)
continue;
// Skip sections without context when SkipFlatProf is true.
if (SkipFlatProf && hasSecFlag(Entry, SecCommonFlags::SecFlagFlat))
continue;
const uint8_t *SecStart = BufStart + Entry.Offset;
uint64_t SecSize = Entry.Size;
// If the section is compressed, decompress it into a buffer
// DecompressBuf before reading the actual data. The pointee of
// 'Data' will be changed to buffer hold by DecompressBuf
// temporarily when reading the actual data.
bool isCompressed = hasSecFlag(Entry, SecCommonFlags::SecFlagCompress);
if (isCompressed) {
const uint8_t *DecompressBuf;
uint64_t DecompressBufSize;
if (std::error_code EC = decompressSection(
SecStart, SecSize, DecompressBuf, DecompressBufSize))
return EC;
SecStart = DecompressBuf;
SecSize = DecompressBufSize;
}
if (std::error_code EC = readOneSection(SecStart, SecSize, Entry))
return EC;
if (Data != SecStart + SecSize)
return sampleprof_error::malformed;
// Change the pointee of 'Data' from DecompressBuf to original Buffer.
if (isCompressed) {
Data = BufStart + Entry.Offset;
End = BufStart + Buffer->getBufferSize();
}
}
return sampleprof_error::success;
}
std::error_code SampleProfileReaderCompactBinary::readImpl() {
// Collect functions used by current module if the Reader has been
// given a module.
bool LoadFuncsToBeUsed = collectFuncsFromModule();
ProfileIsFS = ProfileIsFSDisciminator;
FunctionSamples::ProfileIsFS = ProfileIsFS;
std::vector<uint64_t> OffsetsToUse;
if (!LoadFuncsToBeUsed) {
// load all the function profiles.
for (auto FuncEntry : FuncOffsetTable) {
OffsetsToUse.push_back(FuncEntry.second);
}
} else {
// load function profiles on demand.
for (auto Name : FuncsToUse) {
auto GUID = std::to_string(MD5Hash(Name));
auto iter = FuncOffsetTable.find(StringRef(GUID));
if (iter == FuncOffsetTable.end())
continue;
OffsetsToUse.push_back(iter->second);
}
}
for (auto Offset : OffsetsToUse) {
const uint8_t *SavedData = Data;
if (std::error_code EC = readFuncProfile(
reinterpret_cast<const uint8_t *>(Buffer->getBufferStart()) +
Offset))
return EC;
Data = SavedData;
}
return sampleprof_error::success;
}
std::error_code SampleProfileReaderRawBinary::verifySPMagic(uint64_t Magic) {
if (Magic == SPMagic())
return sampleprof_error::success;
return sampleprof_error::bad_magic;
}
std::error_code SampleProfileReaderExtBinary::verifySPMagic(uint64_t Magic) {
if (Magic == SPMagic(SPF_Ext_Binary))
return sampleprof_error::success;
return sampleprof_error::bad_magic;
}
std::error_code
SampleProfileReaderCompactBinary::verifySPMagic(uint64_t Magic) {
if (Magic == SPMagic(SPF_Compact_Binary))
return sampleprof_error::success;
return sampleprof_error::bad_magic;
}
std::error_code SampleProfileReaderBinary::readNameTable() {
auto Size = readNumber<uint32_t>();
if (std::error_code EC = Size.getError())
return EC;
NameTable.reserve(*Size + NameTable.size());
for (uint32_t I = 0; I < *Size; ++I) {
auto Name(readString());
if (std::error_code EC = Name.getError())
return EC;
NameTable.push_back(*Name);
}
return sampleprof_error::success;
}
std::error_code SampleProfileReaderExtBinaryBase::readMD5NameTable() {
auto Size = readNumber<uint64_t>();
if (std::error_code EC = Size.getError())
return EC;
MD5StringBuf = std::make_unique<std::vector<std::string>>();
MD5StringBuf->reserve(*Size);
if (FixedLengthMD5) {
// Preallocate and initialize NameTable so we can check whether a name
// index has been read before by checking whether the element in the
// NameTable is empty, meanwhile readStringIndex can do the boundary
// check using the size of NameTable.
NameTable.resize(*Size + NameTable.size());
MD5NameMemStart = Data;
Data = Data + (*Size) * sizeof(uint64_t);
return sampleprof_error::success;
}
NameTable.reserve(*Size);
for (uint32_t I = 0; I < *Size; ++I) {
auto FID = readNumber<uint64_t>();
if (std::error_code EC = FID.getError())
return EC;
MD5StringBuf->push_back(std::to_string(*FID));
// NameTable is a vector of StringRef. Here it is pushing back a
// StringRef initialized with the last string in MD5stringBuf.
NameTable.push_back(MD5StringBuf->back());
}
return sampleprof_error::success;
}
std::error_code SampleProfileReaderExtBinaryBase::readNameTableSec(bool IsMD5) {
if (IsMD5)
return readMD5NameTable();
return SampleProfileReaderBinary::readNameTable();
}
[CSSPGO] Split context string to deduplicate function name used in the context. Currently context strings contain a lot of duplicated function names and that significantly increase the profile size. This change split the context into a series of {name, offset, discriminator} tuples so function names used in the context can be replaced by the index into the name table and that significantly reduce the size consumed by context. A follow-up improvement made in the compiler and profiling tools is to avoid reconstructing full context strings which is time- and memory- consuming. Instead a context vector of `StringRef` is adopted to represent the full context in all scenarios. As a result, the previous prevalent profile map which was implemented as a `StringRef` is now engineered as an unordered map keyed by `SampleContext`. `SampleContext` is reshaped to using an `ArrayRef` to represent a full context for CS profile. For non-CS profile, it falls back to use `StringRef` to represent a contextless function name. Both the `ArrayRef` and `StringRef` objects are underpinned by real array and string objects that are stored in producer buffers. For compiler, they are maintained by the sample reader. For llvm-profgen, they are maintained in `ProfiledBinary` and `ProfileGenerator`. Full context strings can be generated only in those cases of debugging and printing. When it comes to profile format, nothing has changed to the text format, though internally CS context is implemented as a vector. Extbinary format is only changed for CS profile, with an additional `SecCSNameTable` section which stores all full contexts logically in the form of `vector<int>`, which each element as an offset points to `SecNameTable`. All occurrences of contexts elsewhere are redirected to using the offset of `SecCSNameTable`. Testing This is no-diff change in terms of code quality and profile content (for text profile). For our internal large service (aka ads), the profile generation is cut to half, with a 20x smaller string-based extbinary format generated. The compile time of ads is dropped by 25%. Differential Revision: https://reviews.llvm.org/D107299
2021-08-26 02:40:34 +08:00
// Read in the CS name table section, which basically contains a list of context
// vectors. Each element of a context vector, aka a frame, refers to the
// underlying raw function names that are stored in the name table, as well as
// a callsite identifier that only makes sense for non-leaf frames.
std::error_code SampleProfileReaderExtBinaryBase::readCSNameTableSec() {
auto Size = readNumber<uint32_t>();
if (std::error_code EC = Size.getError())
return EC;
std::vector<SampleContextFrameVector> *PNameVec =
new std::vector<SampleContextFrameVector>();
PNameVec->reserve(*Size);
for (uint32_t I = 0; I < *Size; ++I) {
PNameVec->emplace_back(SampleContextFrameVector());
auto ContextSize = readNumber<uint32_t>();
if (std::error_code EC = ContextSize.getError())
return EC;
for (uint32_t J = 0; J < *ContextSize; ++J) {
auto FName(readStringFromTable());
if (std::error_code EC = FName.getError())
return EC;
auto LineOffset = readNumber<uint64_t>();
if (std::error_code EC = LineOffset.getError())
return EC;
if (!isOffsetLegal(*LineOffset))
return std::error_code();
auto Discriminator = readNumber<uint64_t>();
if (std::error_code EC = Discriminator.getError())
return EC;
PNameVec->back().emplace_back(
FName.get(), LineLocation(LineOffset.get(), Discriminator.get()));
}
}
// From this point the underlying object of CSNameTable should be immutable.
CSNameTable.reset(PNameVec);
return sampleprof_error::success;
}
std::error_code
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
SampleProfileReaderExtBinaryBase::readFuncMetadata(bool ProfileHasAttribute,
FunctionSamples *FProfile) {
if (Data < End) {
if (ProfileIsProbeBased) {
auto Checksum = readNumber<uint64_t>();
if (std::error_code EC = Checksum.getError())
return EC;
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
if (FProfile)
FProfile->setFunctionHash(*Checksum);
}
if (ProfileHasAttribute) {
auto Attributes = readNumber<uint32_t>();
if (std::error_code EC = Attributes.getError())
return EC;
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
if (FProfile)
FProfile->getContext().setAllAttributes(*Attributes);
}
if (!ProfileIsCSFlat) {
// Read all the attributes for inlined function calls.
auto NumCallsites = readNumber<uint32_t>();
if (std::error_code EC = NumCallsites.getError())
return EC;
for (uint32_t J = 0; J < *NumCallsites; ++J) {
auto LineOffset = readNumber<uint64_t>();
if (std::error_code EC = LineOffset.getError())
return EC;
auto Discriminator = readNumber<uint64_t>();
if (std::error_code EC = Discriminator.getError())
return EC;
auto FContext(readSampleContextFromTable());
if (std::error_code EC = FContext.getError())
return EC;
FunctionSamples *CalleeProfile = nullptr;
if (FProfile) {
CalleeProfile = const_cast<FunctionSamples *>(
&FProfile->functionSamplesAt(LineLocation(
*LineOffset,
*Discriminator))[std::string(FContext.get().getName())]);
}
if (std::error_code EC =
readFuncMetadata(ProfileHasAttribute, CalleeProfile))
return EC;
}
}
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
}
[CSSPGO] Use nested context-sensitive profile. CSSPGO currently employs a flat profile format for context-sensitive profiles. Such a flat profile allows for precisely manipulating contexts that is either inlined or not inlined. This is a benefit over the nested profile format used by non-CS AutoFDO. A downside of this is the longer build time due to parsing the indexing the full CS contexts. For a CS flat profile, though only the context profiles relevant to a module are loaded when that module is compiled, the cost to figure out what profiles are relevant is noticeably high when there're many contexts, since the sample reader will need to scan all context strings anyway. On the contrary, a nested function profile has its related inline subcontexts isolated from other unrelated contexts. Therefore when compiling a set of functions, unrelated contexts will never need to be scanned. In this change we are exploring using nested profile format for CSSPGO. This is expected to work based on an assumption that with a preinliner-computed profile all contexts are precomputed and expected to be inlined by the compiler. Contexts not expected to be inlined will be cut off and returned to corresponding base profiles (for top-level outlined functions). This naturally forms a nested profile where all nested contexts are expected to be inlined. The compiler will less likely optimize on derived contexts that are not precomputed. A CS-nested profile will look exactly the same with regular nested profile except that each nested profile can come with an attributes. With pseudo probes, a nested profile shown as below can also have a CFG checksum. ``` main:1968679:12 2: 24 3: 28 _Z5funcAi:18 3.1: 28 _Z5funcBi:30 3: _Z5funcAi:1467398 0: 10 1: 10 _Z8funcLeafi:11 3: 24 1: _Z8funcLeafi:1467299 0: 6 1: 6 3: 287884 4: 287864 _Z3fibi:315608 15: 23 !CFGChecksum: 138828622701 !Attributes: 2 !CFGChecksum: 281479271677951 !Attributes: 2 ``` Specific work included in this change: - A recursive profile converter to convert CS flat profile to nested profile. - Extend function checksum and attribute metadata to be stored in nested way for text profile and extbinary profile. - Unifiy sample loader inliner path for CS and preinlined nested profile. - Changes in the sample loader to support probe-based nested profile. I've seen promising results regarding build time. A nested profile can result in a 20% shorter build time than a CS flat profile while keep an on-par performance. This is with -duplicate-contexts-into-base=1. Test Plan: Reviewed By: wenlei Differential Revision: https://reviews.llvm.org/D115205
2021-12-15 02:03:05 +08:00
return sampleprof_error::success;
}
std::error_code
SampleProfileReaderExtBinaryBase::readFuncMetadata(bool ProfileHasAttribute) {
while (Data < End) {
auto FContext(readSampleContextFromTable());
if (std::error_code EC = FContext.getError())
return EC;
FunctionSamples *FProfile = nullptr;
auto It = Profiles.find(*FContext);
if (It != Profiles.end())
FProfile = &It->second;
if (std::error_code EC = readFuncMetadata(ProfileHasAttribute, FProfile))
return EC;
}
assert(Data == End && "More data is read than expected");
[CSSPGO] Consume pseudo-probe-based AutoFDO profile This change enables pseudo-probe-based sample counts to be consumed by the sample profile loader under the regular `-fprofile-sample-use` switch with minimal adjustments to the existing sample file formats. After the counts are imported, a probe helper, aka, a `PseudoProbeManager` object, is automatically launched to verify the CFG checksum of every function in the current compilation against the corresponding checksum from the profile. Mismatched checksums will cause a function profile to be slipped. A `SampleProfileProber` pass is scheduled before any of the `SampleProfileLoader` instances so that the CFG checksums as well as probe mappings are available during the profile loading time. The `PseudoProbeManager` object is set up right after the profile reading is done. In the future a CFG-based fuzzy matching could be done in `PseudoProbeManager`. Samples will be applied only to pseudo probe instructions as well as probed callsites once the checksum verification goes through. Those instructions are processed in the same way that regular instructions would be processed in the line-number-based scenario. In other words, a function is processed in a regular way as if it was reduced to just containing pseudo probes (block probes and callsites). **Adjustment to profile format ** A CFG checksum field is being added to the existing AutoFDO profile formats. So far only the text format and the extended binary format are supported. For the text format, a new line like ``` !CFGChecksum: 12345 ``` is added to the end of the body sample lines. For the extended binary profile format, we introduce a metadata section to store the checksum map from function names to their CFG checksums. Differential Revision: https://reviews.llvm.org/D92347
2020-12-17 04:54:50 +08:00
return sampleprof_error::success;
}
std::error_code SampleProfileReaderCompactBinary::readNameTable() {
auto Size = readNumber<uint64_t>();
if (std::error_code EC = Size.getError())
return EC;
NameTable.reserve(*Size);
for (uint32_t I = 0; I < *Size; ++I) {
auto FID = readNumber<uint64_t>();
if (std::error_code EC = FID.getError())
return EC;
NameTable.push_back(std::to_string(*FID));
}
return sampleprof_error::success;
}
std::error_code
SampleProfileReaderExtBinaryBase::readSecHdrTableEntry(uint32_t Idx) {
SecHdrTableEntry Entry;
auto Type = readUnencodedNumber<uint64_t>();
if (std::error_code EC = Type.getError())
return EC;
Entry.Type = static_cast<SecType>(*Type);
auto Flags = readUnencodedNumber<uint64_t>();
if (std::error_code EC = Flags.getError())
return EC;
Entry.Flags = *Flags;
auto Offset = readUnencodedNumber<uint64_t>();
if (std::error_code EC = Offset.getError())
return EC;
Entry.Offset = *Offset;
auto Size = readUnencodedNumber<uint64_t>();
if (std::error_code EC = Size.getError())
return EC;
Entry.Size = *Size;
Entry.LayoutIndex = Idx;
SecHdrTable.push_back(std::move(Entry));
return sampleprof_error::success;
}
std::error_code SampleProfileReaderExtBinaryBase::readSecHdrTable() {
auto EntryNum = readUnencodedNumber<uint64_t>();
if (std::error_code EC = EntryNum.getError())
return EC;
for (uint32_t i = 0; i < (*EntryNum); i++)
if (std::error_code EC = readSecHdrTableEntry(i))
return EC;
return sampleprof_error::success;
}
std::error_code SampleProfileReaderExtBinaryBase::readHeader() {
const uint8_t *BufStart =
reinterpret_cast<const uint8_t *>(Buffer->getBufferStart());
Data = BufStart;
End = BufStart + Buffer->getBufferSize();
if (std::error_code EC = readMagicIdent())
return EC;
if (std::error_code EC = readSecHdrTable())
return EC;
return sampleprof_error::success;
}
uint64_t SampleProfileReaderExtBinaryBase::getSectionSize(SecType Type) {
uint64_t Size = 0;
for (auto &Entry : SecHdrTable) {
if (Entry.Type == Type)
Size += Entry.Size;
}
return Size;
}
uint64_t SampleProfileReaderExtBinaryBase::getFileSize() {
// Sections in SecHdrTable is not necessarily in the same order as
// sections in the profile because section like FuncOffsetTable needs
// to be written after section LBRProfile but needs to be read before
// section LBRProfile, so we cannot simply use the last entry in
// SecHdrTable to calculate the file size.
uint64_t FileSize = 0;
for (auto &Entry : SecHdrTable) {
FileSize = std::max(Entry.Offset + Entry.Size, FileSize);
}
return FileSize;
}
static std::string getSecFlagsStr(const SecHdrTableEntry &Entry) {
std::string Flags;
if (hasSecFlag(Entry, SecCommonFlags::SecFlagCompress))
Flags.append("{compressed,");
else
Flags.append("{");
if (hasSecFlag(Entry, SecCommonFlags::SecFlagFlat))
Flags.append("flat,");
switch (Entry.Type) {
case SecNameTable:
if (hasSecFlag(Entry, SecNameTableFlags::SecFlagFixedLengthMD5))
Flags.append("fixlenmd5,");
else if (hasSecFlag(Entry, SecNameTableFlags::SecFlagMD5Name))
Flags.append("md5,");
if (hasSecFlag(Entry, SecNameTableFlags::SecFlagUniqSuffix))
Flags.append("uniq,");
break;
case SecProfSummary:
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagPartial))
Flags.append("partial,");
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagFullContext))
Flags.append("context,");
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagIsCSNested))
Flags.append("context-nested,");
if (hasSecFlag(Entry, SecProfSummaryFlags::SecFlagFSDiscriminator))
Flags.append("fs-discriminator,");
break;
case SecFuncOffsetTable:
if (hasSecFlag(Entry, SecFuncOffsetFlags::SecFlagOrdered))
Flags.append("ordered,");
break;
case SecFuncMetadata:
if (hasSecFlag(Entry, SecFuncMetadataFlags::SecFlagIsProbeBased))
Flags.append("probe,");
if (hasSecFlag(Entry, SecFuncMetadataFlags::SecFlagHasAttribute))
Flags.append("attr,");
break;
default:
break;
}
char &last = Flags.back();
if (last == ',')
last = '}';
else
Flags.append("}");
return Flags;
}
bool SampleProfileReaderExtBinaryBase::dumpSectionInfo(raw_ostream &OS) {
uint64_t TotalSecsSize = 0;
for (auto &Entry : SecHdrTable) {
OS << getSecName(Entry.Type) << " - Offset: " << Entry.Offset
<< ", Size: " << Entry.Size << ", Flags: " << getSecFlagsStr(Entry)
<< "\n";
;
TotalSecsSize += Entry.Size;
}
uint64_t HeaderSize = SecHdrTable.front().Offset;
assert(HeaderSize + TotalSecsSize == getFileSize() &&
"Size of 'header + sections' doesn't match the total size of profile");
OS << "Header Size: " << HeaderSize << "\n";
OS << "Total Sections Size: " << TotalSecsSize << "\n";
OS << "File Size: " << getFileSize() << "\n";
return true;
}
std::error_code SampleProfileReaderBinary::readMagicIdent() {
// Read and check the magic identifier.
auto Magic = readNumber<uint64_t>();
if (std::error_code EC = Magic.getError())
return EC;
else if (std::error_code EC = verifySPMagic(*Magic))
return EC;
// Read the version number.
auto Version = readNumber<uint64_t>();
if (std::error_code EC = Version.getError())
return EC;
else if (*Version != SPVersion())
return sampleprof_error::unsupported_version;
return sampleprof_error::success;
}
std::error_code SampleProfileReaderBinary::readHeader() {
Data = reinterpret_cast<const uint8_t *>(Buffer->getBufferStart());
End = Data + Buffer->getBufferSize();
if (std::error_code EC = readMagicIdent())
return EC;
if (std::error_code EC = readSummary())
return EC;
if (std::error_code EC = readNameTable())
return EC;
return sampleprof_error::success;
}
std::error_code SampleProfileReaderCompactBinary::readHeader() {
SampleProfileReaderBinary::readHeader();
if (std::error_code EC = readFuncOffsetTable())
return EC;
return sampleprof_error::success;
}
std::error_code SampleProfileReaderCompactBinary::readFuncOffsetTable() {
auto TableOffset = readUnencodedNumber<uint64_t>();
if (std::error_code EC = TableOffset.getError())
return EC;
const uint8_t *SavedData = Data;
const uint8_t *TableStart =
reinterpret_cast<const uint8_t *>(Buffer->getBufferStart()) +
*TableOffset;
Data = TableStart;
auto Size = readNumber<uint64_t>();
if (std::error_code EC = Size.getError())
return EC;
FuncOffsetTable.reserve(*Size);
for (uint32_t I = 0; I < *Size; ++I) {
auto FName(readStringFromTable());
if (std::error_code EC = FName.getError())
return EC;
auto Offset = readNumber<uint64_t>();
if (std::error_code EC = Offset.getError())
return EC;
FuncOffsetTable[*FName] = *Offset;
}
End = TableStart;
Data = SavedData;
return sampleprof_error::success;
}
bool SampleProfileReaderCompactBinary::collectFuncsFromModule() {
if (!M)
return false;
FuncsToUse.clear();
for (auto &F : *M)
FuncsToUse.insert(FunctionSamples::getCanonicalFnName(F));
return true;
}
std::error_code SampleProfileReaderBinary::readSummaryEntry(
std::vector<ProfileSummaryEntry> &Entries) {
auto Cutoff = readNumber<uint64_t>();
if (std::error_code EC = Cutoff.getError())
return EC;
auto MinBlockCount = readNumber<uint64_t>();
if (std::error_code EC = MinBlockCount.getError())
return EC;
auto NumBlocks = readNumber<uint64_t>();
if (std::error_code EC = NumBlocks.getError())
return EC;
Entries.emplace_back(*Cutoff, *MinBlockCount, *NumBlocks);
return sampleprof_error::success;
}
std::error_code SampleProfileReaderBinary::readSummary() {
auto TotalCount = readNumber<uint64_t>();
if (std::error_code EC = TotalCount.getError())
return EC;
auto MaxBlockCount = readNumber<uint64_t>();
if (std::error_code EC = MaxBlockCount.getError())
return EC;
auto MaxFunctionCount = readNumber<uint64_t>();
if (std::error_code EC = MaxFunctionCount.getError())
return EC;
auto NumBlocks = readNumber<uint64_t>();
if (std::error_code EC = NumBlocks.getError())
return EC;
auto NumFunctions = readNumber<uint64_t>();
if (std::error_code EC = NumFunctions.getError())
return EC;
auto NumSummaryEntries = readNumber<uint64_t>();
if (std::error_code EC = NumSummaryEntries.getError())
return EC;
std::vector<ProfileSummaryEntry> Entries;
for (unsigned i = 0; i < *NumSummaryEntries; i++) {
std::error_code EC = readSummaryEntry(Entries);
if (EC != sampleprof_error::success)
return EC;
}
Summary = std::make_unique<ProfileSummary>(
ProfileSummary::PSK_Sample, Entries, *TotalCount, *MaxBlockCount, 0,
*MaxFunctionCount, *NumBlocks, *NumFunctions);
return sampleprof_error::success;
}
bool SampleProfileReaderRawBinary::hasFormat(const MemoryBuffer &Buffer) {
const uint8_t *Data =
reinterpret_cast<const uint8_t *>(Buffer.getBufferStart());
uint64_t Magic = decodeULEB128(Data);
return Magic == SPMagic();
}
bool SampleProfileReaderExtBinary::hasFormat(const MemoryBuffer &Buffer) {
const uint8_t *Data =
reinterpret_cast<const uint8_t *>(Buffer.getBufferStart());
uint64_t Magic = decodeULEB128(Data);
return Magic == SPMagic(SPF_Ext_Binary);
}
bool SampleProfileReaderCompactBinary::hasFormat(const MemoryBuffer &Buffer) {
const uint8_t *Data =
reinterpret_cast<const uint8_t *>(Buffer.getBufferStart());
uint64_t Magic = decodeULEB128(Data);
return Magic == SPMagic(SPF_Compact_Binary);
}
std::error_code SampleProfileReaderGCC::skipNextWord() {
uint32_t dummy;
if (!GcovBuffer.readInt(dummy))
return sampleprof_error::truncated;
return sampleprof_error::success;
}
template <typename T> ErrorOr<T> SampleProfileReaderGCC::readNumber() {
if (sizeof(T) <= sizeof(uint32_t)) {
uint32_t Val;
if (GcovBuffer.readInt(Val) && Val <= std::numeric_limits<T>::max())
return static_cast<T>(Val);
} else if (sizeof(T) <= sizeof(uint64_t)) {
uint64_t Val;
if (GcovBuffer.readInt64(Val) && Val <= std::numeric_limits<T>::max())
return static_cast<T>(Val);
}
std::error_code EC = sampleprof_error::malformed;
reportError(0, EC.message());
return EC;
}
ErrorOr<StringRef> SampleProfileReaderGCC::readString() {
StringRef Str;
if (!GcovBuffer.readString(Str))
return sampleprof_error::truncated;
return Str;
}
std::error_code SampleProfileReaderGCC::readHeader() {
// Read the magic identifier.
if (!GcovBuffer.readGCDAFormat())
return sampleprof_error::unrecognized_format;
// Read the version number. Note - the GCC reader does not validate this
// version, but the profile creator generates v704.
GCOV::GCOVVersion version;
if (!GcovBuffer.readGCOVVersion(version))
return sampleprof_error::unrecognized_format;
if (version != GCOV::V407)
return sampleprof_error::unsupported_version;
// Skip the empty integer.
if (std::error_code EC = skipNextWord())
return EC;
return sampleprof_error::success;
}
std::error_code SampleProfileReaderGCC::readSectionTag(uint32_t Expected) {
uint32_t Tag;
if (!GcovBuffer.readInt(Tag))
return sampleprof_error::truncated;
if (Tag != Expected)
return sampleprof_error::malformed;
if (std::error_code EC = skipNextWord())
return EC;
return sampleprof_error::success;
}
std::error_code SampleProfileReaderGCC::readNameTable() {
if (std::error_code EC = readSectionTag(GCOVTagAFDOFileNames))
return EC;
uint32_t Size;
if (!GcovBuffer.readInt(Size))
return sampleprof_error::truncated;
for (uint32_t I = 0; I < Size; ++I) {
StringRef Str;
if (!GcovBuffer.readString(Str))
return sampleprof_error::truncated;
Names.push_back(std::string(Str));
}
return sampleprof_error::success;
}
std::error_code SampleProfileReaderGCC::readFunctionProfiles() {
if (std::error_code EC = readSectionTag(GCOVTagAFDOFunction))
return EC;
uint32_t NumFunctions;
if (!GcovBuffer.readInt(NumFunctions))
return sampleprof_error::truncated;
InlineCallStack Stack;
for (uint32_t I = 0; I < NumFunctions; ++I)
if (std::error_code EC = readOneFunctionProfile(Stack, true, 0))
return EC;
computeSummary();
return sampleprof_error::success;
}
std::error_code SampleProfileReaderGCC::readOneFunctionProfile(
const InlineCallStack &InlineStack, bool Update, uint32_t Offset) {
uint64_t HeadCount = 0;
if (InlineStack.size() == 0)
if (!GcovBuffer.readInt64(HeadCount))
return sampleprof_error::truncated;
uint32_t NameIdx;
if (!GcovBuffer.readInt(NameIdx))
return sampleprof_error::truncated;
StringRef Name(Names[NameIdx]);
uint32_t NumPosCounts;
if (!GcovBuffer.readInt(NumPosCounts))
return sampleprof_error::truncated;
uint32_t NumCallsites;
if (!GcovBuffer.readInt(NumCallsites))
return sampleprof_error::truncated;
FunctionSamples *FProfile = nullptr;
if (InlineStack.size() == 0) {
// If this is a top function that we have already processed, do not
// update its profile again. This happens in the presence of
// function aliases. Since these aliases share the same function
// body, there will be identical replicated profiles for the
// original function. In this case, we simply not bother updating
// the profile of the original function.
FProfile = &Profiles[Name];
FProfile->addHeadSamples(HeadCount);
if (FProfile->getTotalSamples() > 0)
Update = false;
} else {
// Otherwise, we are reading an inlined instance. The top of the
// inline stack contains the profile of the caller. Insert this
// callee in the caller's CallsiteMap.
FunctionSamples *CallerProfile = InlineStack.front();
uint32_t LineOffset = Offset >> 16;
uint32_t Discriminator = Offset & 0xffff;
FProfile = &CallerProfile->functionSamplesAt(
LineLocation(LineOffset, Discriminator))[std::string(Name)];
}
FProfile->setName(Name);
for (uint32_t I = 0; I < NumPosCounts; ++I) {
uint32_t Offset;
if (!GcovBuffer.readInt(Offset))
return sampleprof_error::truncated;
uint32_t NumTargets;
if (!GcovBuffer.readInt(NumTargets))
return sampleprof_error::truncated;
uint64_t Count;
if (!GcovBuffer.readInt64(Count))
return sampleprof_error::truncated;
// The line location is encoded in the offset as:
// high 16 bits: line offset to the start of the function.
// low 16 bits: discriminator.
uint32_t LineOffset = Offset >> 16;
uint32_t Discriminator = Offset & 0xffff;
InlineCallStack NewStack;
NewStack.push_back(FProfile);
2021-01-07 10:27:33 +08:00
llvm::append_range(NewStack, InlineStack);
if (Update) {
// Walk up the inline stack, adding the samples on this line to
// the total sample count of the callers in the chain.
for (auto CallerProfile : NewStack)
CallerProfile->addTotalSamples(Count);
// Update the body samples for the current profile.
FProfile->addBodySamples(LineOffset, Discriminator, Count);
}
// Process the list of functions called at an indirect call site.
// These are all the targets that a function pointer (or virtual
// function) resolved at runtime.
for (uint32_t J = 0; J < NumTargets; J++) {
uint32_t HistVal;
if (!GcovBuffer.readInt(HistVal))
return sampleprof_error::truncated;
if (HistVal != HIST_TYPE_INDIR_CALL_TOPN)
return sampleprof_error::malformed;
uint64_t TargetIdx;
if (!GcovBuffer.readInt64(TargetIdx))
return sampleprof_error::truncated;
StringRef TargetName(Names[TargetIdx]);
uint64_t TargetCount;
if (!GcovBuffer.readInt64(TargetCount))
return sampleprof_error::truncated;
if (Update)
FProfile->addCalledTargetSamples(LineOffset, Discriminator,
TargetName, TargetCount);
}
}
// Process all the inlined callers into the current function. These
// are all the callsites that were inlined into this function.
for (uint32_t I = 0; I < NumCallsites; I++) {
// The offset is encoded as:
// high 16 bits: line offset to the start of the function.
// low 16 bits: discriminator.
uint32_t Offset;
if (!GcovBuffer.readInt(Offset))
return sampleprof_error::truncated;
InlineCallStack NewStack;
NewStack.push_back(FProfile);
2021-01-07 10:27:33 +08:00
llvm::append_range(NewStack, InlineStack);
if (std::error_code EC = readOneFunctionProfile(NewStack, Update, Offset))
return EC;
}
return sampleprof_error::success;
}
/// Read a GCC AutoFDO profile.
///
/// This format is generated by the Linux Perf conversion tool at
/// https://github.com/google/autofdo.
std::error_code SampleProfileReaderGCC::readImpl() {
assert(!ProfileIsFSDisciminator && "Gcc profiles not support FSDisciminator");
// Read the string table.
if (std::error_code EC = readNameTable())
return EC;
// Read the source profile.
if (std::error_code EC = readFunctionProfiles())
return EC;
return sampleprof_error::success;
}
bool SampleProfileReaderGCC::hasFormat(const MemoryBuffer &Buffer) {
StringRef Magic(reinterpret_cast<const char *>(Buffer.getBufferStart()));
return Magic == "adcg*704";
}
void SampleProfileReaderItaniumRemapper::applyRemapping(LLVMContext &Ctx) {
// If the reader uses MD5 to represent string, we can't remap it because
// we don't know what the original function names were.
if (Reader.useMD5()) {
Ctx.diagnose(DiagnosticInfoSampleProfile(
Reader.getBuffer()->getBufferIdentifier(),
"Profile data remapping cannot be applied to profile data "
"in compact format (original mangled names are not available).",
DS_Warning));
return;
}
[CSSPGO] Infrastructure for context-sensitive Sample PGO and Inlining This change adds the context-senstive sample PGO infracture described in CSSPGO RFC (https://groups.google.com/g/llvm-dev/c/1p1rdYbL93s). It introduced an abstraction between input profile and profile loader that queries input profile for functions. Specifically, there's now the notion of base profile and context profile, and they are managed by the new SampleContextTracker for adjusting and merging profiles based on inline decisions. It works with top-down profiled guided inliner in profile loader (https://reviews.llvm.org/D70655) for better inlining with specialization and better post-inline profile fidelity. In the future, we can also expose this infrastructure to CGSCC inliner in order for it to take advantage of context-sensitive profile. This change is the consumption part of context-sensitive profile (The generation part is in this stack: https://reviews.llvm.org/D89707). We've seen good results internally in conjunction with Pseudo-probe (https://reviews.llvm.org/D86193). Pacthes for integration with Pseudo-probe coming up soon. Currently the new infrastructure kick in when input profile contains the new context-sensitive profile; otherwise it's no-op and does not affect existing AutoFDO. **Interface** There're two sets of interfaces for query and tracking respectively exposed from SampleContextTracker. For query, now instead of simply getting a profile from input for a function, we can explicitly query base profile or context profile for given call path of a function. For tracking, there're separate APIs for marking context profile as inlined, or promoting and merging not inlined context profile. - Query base profile (`getBaseSamplesFor`) Base profile is the merged synthetic profile for function's CFG profile from any outstanding (not inlined) context. We can query base profile by function. - Query context profile (`getContextSamplesFor`) Context profile is a function's CFG profile for a given calling context. We can query context profile by context string. - Track inlined context profile (`markContextSamplesInlined`) When a function is inlined for given calling context, we need to mark the context profile for that context as inlined. This is to make sure we don't include inlined context profile when synthesizing base profile for that inlined function. - Track not-inlined context profile (`promoteMergeContextSamplesTree`) When a function is not inlined for given calling context, we need to promote the context profile tree so the not inlined context becomes top-level context. This preserve the sub-context under that function so later inline decision for that not inlined function will still have context profile for its call tree. Note that profile will be merged if needed when promoting a context profile tree if any of the node already exists at its promoted destination. **Implementation** Implementation-wise, `SampleContext` is created as abstraction for context. Currently it's a string for call path, and we can later optimize it to something more efficient, e.g. context id. Each `SampleContext` also has a `ContextState` indicating whether it's raw context profile from input, whether it's inlined or merged, whether it's synthetic profile created by compiler. Each `FunctionSamples` now has a `SampleContext` that tells whether it's base profile or context profile, and for context profile what is the context and state. On top of the above context representation, a custom trie tree is implemented to track and manager context profiles. Specifically, `SampleContextTracker` is implemented that encapsulates a trie tree with `ContextTireNode` as node. Each node of the trie tree represents a frame in calling context, thus the path from root to a node represents a valid calling context. We also track `FunctionSamples` for each node, so this trie tree can serve efficient query for context profile. Accordingly, context profile tree promotion now becomes moving a subtree to be under the root of entire tree, and merge nodes for subtree if this move encounters existing nodes. **Integration** `SampleContextTracker` is now also integrated with AutoFDO, `SampleProfileReader` and `SampleProfileLoader`. When we detected input profile contains context-sensitive profile, `SampleContextTracker` will be used to track profiles, and all profile query will go to `SampleContextTracker` instead of `SampleProfileReader` automatically. Tracking APIs are called automatically for each inline decision from `SampleProfileLoader`. Differential Revision: https://reviews.llvm.org/D90125
2020-03-24 14:50:41 +08:00
// CSSPGO-TODO: Remapper is not yet supported.
// We will need to remap the entire context string.
assert(Remappings && "should be initialized while creating remapper");
[SampleFDO] Enhance profile remapping support for searching inline instance and indirect call promotion candidate. Profile remapping is a feature to match a function in the module with its profile in sample profile if the function name and the name in profile look different but are equivalent using given remapping rules. This is a useful feature to keep the performance stable by specifying some remapping rules when sampleFDO targets are going through some large scale function signature change. However, currently profile remapping support is only valid for outline function profile in SampleFDO. It cannot match a callee with an inline instance profile if they have different but equivalent names. We found that without the support for inline instance profile, remapping is less effective for some large scale change. To add that support, before any remapping lookup happens, all the names in the profile will be inserted into remapper and the Key to the name mapping will be recorded in a map called NameMap in the remapper. During name lookup, a Key will be returned for the given name and it will be used to extract an equivalent name in the profile from NameMap. So with the help of the NameMap, we can translate any given name to an equivalent name in the profile if it exists. Whenever we try to match a name in the module to a name in the profile, we will try the match with the original name first, and if it doesn't match, we will use the equivalent name got from remapper to try the match for another time. In this way, the patch can enhance the profile remapping support for searching inline instance and searching indirect call promotion candidate. In a planned large scale change of int64 type (long long) to int64_t (long), we found the performance of a google internal benchmark degraded by 2% if nothing was done. If existing profile remapping was enabled, the performance degradation dropped to 1.2%. If the profile remapping with the current patch was enabled, the performance degradation further dropped to 0.14% (Note the experiment was done before searching indirect call promotion candidate was added. We hope with the remapping support of searching indirect call promotion candidate, the degradation can drop to 0% in the end. It will be evaluated post commit). Differential Revision: https://reviews.llvm.org/D86332
2020-08-25 13:59:20 +08:00
for (auto &Sample : Reader.getProfiles()) {
DenseSet<StringRef> NamesInSample;
Sample.second.findAllNames(NamesInSample);
for (auto &Name : NamesInSample)
if (auto Key = Remappings->insert(Name))
NameMap.insert({Key, Name});
}
RemappingApplied = true;
}
[SampleFDO] Enhance profile remapping support for searching inline instance and indirect call promotion candidate. Profile remapping is a feature to match a function in the module with its profile in sample profile if the function name and the name in profile look different but are equivalent using given remapping rules. This is a useful feature to keep the performance stable by specifying some remapping rules when sampleFDO targets are going through some large scale function signature change. However, currently profile remapping support is only valid for outline function profile in SampleFDO. It cannot match a callee with an inline instance profile if they have different but equivalent names. We found that without the support for inline instance profile, remapping is less effective for some large scale change. To add that support, before any remapping lookup happens, all the names in the profile will be inserted into remapper and the Key to the name mapping will be recorded in a map called NameMap in the remapper. During name lookup, a Key will be returned for the given name and it will be used to extract an equivalent name in the profile from NameMap. So with the help of the NameMap, we can translate any given name to an equivalent name in the profile if it exists. Whenever we try to match a name in the module to a name in the profile, we will try the match with the original name first, and if it doesn't match, we will use the equivalent name got from remapper to try the match for another time. In this way, the patch can enhance the profile remapping support for searching inline instance and searching indirect call promotion candidate. In a planned large scale change of int64 type (long long) to int64_t (long), we found the performance of a google internal benchmark degraded by 2% if nothing was done. If existing profile remapping was enabled, the performance degradation dropped to 1.2%. If the profile remapping with the current patch was enabled, the performance degradation further dropped to 0.14% (Note the experiment was done before searching indirect call promotion candidate was added. We hope with the remapping support of searching indirect call promotion candidate, the degradation can drop to 0% in the end. It will be evaluated post commit). Differential Revision: https://reviews.llvm.org/D86332
2020-08-25 13:59:20 +08:00
Optional<StringRef>
SampleProfileReaderItaniumRemapper::lookUpNameInProfile(StringRef Fname) {
if (auto Key = Remappings->lookup(Fname))
[SampleFDO] Enhance profile remapping support for searching inline instance and indirect call promotion candidate. Profile remapping is a feature to match a function in the module with its profile in sample profile if the function name and the name in profile look different but are equivalent using given remapping rules. This is a useful feature to keep the performance stable by specifying some remapping rules when sampleFDO targets are going through some large scale function signature change. However, currently profile remapping support is only valid for outline function profile in SampleFDO. It cannot match a callee with an inline instance profile if they have different but equivalent names. We found that without the support for inline instance profile, remapping is less effective for some large scale change. To add that support, before any remapping lookup happens, all the names in the profile will be inserted into remapper and the Key to the name mapping will be recorded in a map called NameMap in the remapper. During name lookup, a Key will be returned for the given name and it will be used to extract an equivalent name in the profile from NameMap. So with the help of the NameMap, we can translate any given name to an equivalent name in the profile if it exists. Whenever we try to match a name in the module to a name in the profile, we will try the match with the original name first, and if it doesn't match, we will use the equivalent name got from remapper to try the match for another time. In this way, the patch can enhance the profile remapping support for searching inline instance and searching indirect call promotion candidate. In a planned large scale change of int64 type (long long) to int64_t (long), we found the performance of a google internal benchmark degraded by 2% if nothing was done. If existing profile remapping was enabled, the performance degradation dropped to 1.2%. If the profile remapping with the current patch was enabled, the performance degradation further dropped to 0.14% (Note the experiment was done before searching indirect call promotion candidate was added. We hope with the remapping support of searching indirect call promotion candidate, the degradation can drop to 0% in the end. It will be evaluated post commit). Differential Revision: https://reviews.llvm.org/D86332
2020-08-25 13:59:20 +08:00
return NameMap.lookup(Key);
return None;
}
/// Prepare a memory buffer for the contents of \p Filename.
///
/// \returns an error code indicating the status of the buffer.
static ErrorOr<std::unique_ptr<MemoryBuffer>>
setupMemoryBuffer(const Twine &Filename) {
auto BufferOrErr = MemoryBuffer::getFileOrSTDIN(Filename, /*IsText=*/true);
if (std::error_code EC = BufferOrErr.getError())
return EC;
auto Buffer = std::move(BufferOrErr.get());
// Check the file.
if (uint64_t(Buffer->getBufferSize()) > std::numeric_limits<uint32_t>::max())
return sampleprof_error::too_large;
return std::move(Buffer);
}
/// Create a sample profile reader based on the format of the input file.
///
/// \param Filename The file to open.
///
/// \param C The LLVM context to use to emit diagnostics.
///
/// \param P The FSDiscriminatorPass.
///
/// \param RemapFilename The file used for profile remapping.
///
/// \returns an error code indicating the status of the created reader.
ErrorOr<std::unique_ptr<SampleProfileReader>>
SampleProfileReader::create(const std::string Filename, LLVMContext &C,
FSDiscriminatorPass P,
const std::string RemapFilename) {
auto BufferOrError = setupMemoryBuffer(Filename);
if (std::error_code EC = BufferOrError.getError())
return EC;
return create(BufferOrError.get(), C, P, RemapFilename);
}
/// Create a sample profile remapper from the given input, to remap the
/// function names in the given profile data.
///
/// \param Filename The file to open.
///
/// \param Reader The profile reader the remapper is going to be applied to.
///
/// \param C The LLVM context to use to emit diagnostics.
///
/// \returns an error code indicating the status of the created reader.
ErrorOr<std::unique_ptr<SampleProfileReaderItaniumRemapper>>
SampleProfileReaderItaniumRemapper::create(const std::string Filename,
SampleProfileReader &Reader,
LLVMContext &C) {
auto BufferOrError = setupMemoryBuffer(Filename);
if (std::error_code EC = BufferOrError.getError())
return EC;
return create(BufferOrError.get(), Reader, C);
}
/// Create a sample profile remapper from the given input, to remap the
/// function names in the given profile data.
///
/// \param B The memory buffer to create the reader from (assumes ownership).
///
/// \param C The LLVM context to use to emit diagnostics.
///
/// \param Reader The profile reader the remapper is going to be applied to.
///
/// \returns an error code indicating the status of the created reader.
ErrorOr<std::unique_ptr<SampleProfileReaderItaniumRemapper>>
SampleProfileReaderItaniumRemapper::create(std::unique_ptr<MemoryBuffer> &B,
SampleProfileReader &Reader,
LLVMContext &C) {
auto Remappings = std::make_unique<SymbolRemappingReader>();
if (Error E = Remappings->read(*B)) {
handleAllErrors(
std::move(E), [&](const SymbolRemappingParseError &ParseError) {
C.diagnose(DiagnosticInfoSampleProfile(B->getBufferIdentifier(),
ParseError.getLineNum(),
ParseError.getMessage()));
});
return sampleprof_error::malformed;
}
return std::make_unique<SampleProfileReaderItaniumRemapper>(
std::move(B), std::move(Remappings), Reader);
}
/// Create a sample profile reader based on the format of the input data.
///
/// \param B The memory buffer to create the reader from (assumes ownership).
///
/// \param C The LLVM context to use to emit diagnostics.
///
/// \param P The FSDiscriminatorPass.
///
/// \param RemapFilename The file used for profile remapping.
///
/// \returns an error code indicating the status of the created reader.
ErrorOr<std::unique_ptr<SampleProfileReader>>
SampleProfileReader::create(std::unique_ptr<MemoryBuffer> &B, LLVMContext &C,
FSDiscriminatorPass P,
const std::string RemapFilename) {
std::unique_ptr<SampleProfileReader> Reader;
if (SampleProfileReaderRawBinary::hasFormat(*B))
Reader.reset(new SampleProfileReaderRawBinary(std::move(B), C));
else if (SampleProfileReaderExtBinary::hasFormat(*B))
Reader.reset(new SampleProfileReaderExtBinary(std::move(B), C));
else if (SampleProfileReaderCompactBinary::hasFormat(*B))
Reader.reset(new SampleProfileReaderCompactBinary(std::move(B), C));
else if (SampleProfileReaderGCC::hasFormat(*B))
Reader.reset(new SampleProfileReaderGCC(std::move(B), C));
else if (SampleProfileReaderText::hasFormat(*B))
Reader.reset(new SampleProfileReaderText(std::move(B), C));
else
return sampleprof_error::unrecognized_format;
if (!RemapFilename.empty()) {
auto ReaderOrErr =
SampleProfileReaderItaniumRemapper::create(RemapFilename, *Reader, C);
if (std::error_code EC = ReaderOrErr.getError()) {
std::string Msg = "Could not create remapper: " + EC.message();
C.diagnose(DiagnosticInfoSampleProfile(RemapFilename, Msg));
return EC;
}
Reader->Remapper = std::move(ReaderOrErr.get());
}
if (std::error_code EC = Reader->readHeader()) {
return EC;
}
Reader->setDiscriminatorMaskedBitFrom(P);
return std::move(Reader);
}
// For text and GCC file formats, we compute the summary after reading the
// profile. Binary format has the profile summary in its header.
void SampleProfileReader::computeSummary() {
SampleProfileSummaryBuilder Builder(ProfileSummaryBuilder::DefaultCutoffs);
Summary = Builder.computeSummaryForProfiles(Profiles);
}