2016-02-10 06:50:34 +08:00
|
|
|
//===- WholeProgramDevirt.cpp - Whole program virtual call optimization ---===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-02-10 06:50:34 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass implements whole program optimization of virtual calls in cases
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
// where we know (via !type metadata) that the list of callees is fixed. This
|
2016-02-10 06:50:34 +08:00
|
|
|
// includes the following:
|
|
|
|
// - Single implementation devirtualization: if a virtual call has a single
|
|
|
|
// possible callee, replace all calls with a direct call to that callee.
|
|
|
|
// - Virtual constant propagation: if the virtual function's return type is an
|
|
|
|
// integer <=64 bits and all possible callees are readnone, for each class and
|
|
|
|
// each list of constant arguments: evaluate the function, store the return
|
|
|
|
// value alongside the virtual table, and rewrite each virtual call as a load
|
|
|
|
// from the virtual table.
|
|
|
|
// - Uniform return value optimization: if the conditions for virtual constant
|
|
|
|
// propagation hold and each function returns the same constant value, replace
|
|
|
|
// each virtual call with that constant.
|
|
|
|
// - Unique return value optimization for i1 return values: if the conditions
|
|
|
|
// for virtual constant propagation hold and a single vtable's function
|
|
|
|
// returns 0, or a single vtable's function returns 1, replace each virtual
|
|
|
|
// call with a comparison of the vptr against that vtable's address.
|
|
|
|
//
|
2019-08-02 21:10:52 +08:00
|
|
|
// This pass is intended to be used during the regular and thin LTO pipelines:
|
|
|
|
//
|
2017-03-04 09:23:30 +08:00
|
|
|
// During regular LTO, the pass determines the best optimization for each
|
|
|
|
// virtual call and applies the resolutions directly to virtual calls that are
|
|
|
|
// eligible for virtual call optimization (i.e. calls that use either of the
|
2019-08-02 21:10:52 +08:00
|
|
|
// llvm.assume(llvm.type.test) or llvm.type.checked.load intrinsics).
|
|
|
|
//
|
|
|
|
// During hybrid Regular/ThinLTO, the pass operates in two phases:
|
2017-03-04 09:23:30 +08:00
|
|
|
// - Export phase: this is run during the thin link over a single merged module
|
|
|
|
// that contains all vtables with !type metadata that participate in the link.
|
|
|
|
// The pass computes a resolution for each virtual call and stores it in the
|
|
|
|
// type identifier summary.
|
|
|
|
// - Import phase: this is run during the thin backends over the individual
|
|
|
|
// modules. The pass applies the resolutions previously computed during the
|
|
|
|
// import phase to each eligible virtual call.
|
|
|
|
//
|
2019-08-02 21:10:52 +08:00
|
|
|
// During ThinLTO, the pass operates in two phases:
|
|
|
|
// - Export phase: this is run during the thin link over the index which
|
|
|
|
// contains a summary of all vtables with !type metadata that participate in
|
|
|
|
// the link. It computes a resolution for each virtual call and stores it in
|
|
|
|
// the type identifier summary. Only single implementation devirtualization
|
|
|
|
// is supported.
|
|
|
|
// - Import phase: (same as with hybrid case above).
|
|
|
|
//
|
2016-02-10 06:50:34 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Transforms/IPO/WholeProgramDevirt.h"
|
2016-04-18 17:17:29 +08:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
|
|
|
#include "llvm/ADT/DenseMapInfo.h"
|
2016-02-10 06:50:34 +08:00
|
|
|
#include "llvm/ADT/DenseSet.h"
|
|
|
|
#include "llvm/ADT/MapVector.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/ADT/iterator_range.h"
|
2017-02-18 02:17:04 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2017-10-10 07:19:02 +08:00
|
|
|
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
#include "llvm/Analysis/TypeMetadataUtils.h"
|
2016-02-10 06:50:34 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/IR/DebugLoc.h"
|
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
2018-09-27 22:55:32 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/GlobalAlias.h"
|
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
2016-02-10 06:50:34 +08:00
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
2016-02-10 06:50:34 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "llvm/IR/Metadata.h"
|
2016-02-10 06:50:34 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2017-02-14 03:26:18 +08:00
|
|
|
#include "llvm/IR/ModuleSummaryIndexYAML.h"
|
2016-02-10 06:50:34 +08:00
|
|
|
#include "llvm/Pass.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/PassRegistry.h"
|
|
|
|
#include "llvm/PassSupport.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2017-02-14 03:26:18 +08:00
|
|
|
#include "llvm/Support/Error.h"
|
|
|
|
#include "llvm/Support/FileSystem.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2016-04-18 17:17:29 +08:00
|
|
|
#include "llvm/Transforms/IPO.h"
|
2017-02-18 02:17:04 +08:00
|
|
|
#include "llvm/Transforms/IPO/FunctionAttrs.h"
|
2016-02-10 06:50:34 +08:00
|
|
|
#include "llvm/Transforms/Utils/Evaluator.h"
|
2016-08-12 01:20:18 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cstddef>
|
|
|
|
#include <map>
|
2016-02-10 06:50:34 +08:00
|
|
|
#include <set>
|
2016-08-12 01:20:18 +08:00
|
|
|
#include <string>
|
2016-02-10 06:50:34 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
using namespace wholeprogramdevirt;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "wholeprogramdevirt"
|
|
|
|
|
2017-02-14 03:26:18 +08:00
|
|
|
static cl::opt<PassSummaryAction> ClSummaryAction(
|
|
|
|
"wholeprogramdevirt-summary-action",
|
|
|
|
cl::desc("What to do with the summary when running this pass"),
|
|
|
|
cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"),
|
|
|
|
clEnumValN(PassSummaryAction::Import, "import",
|
|
|
|
"Import typeid resolutions from summary and globals"),
|
|
|
|
clEnumValN(PassSummaryAction::Export, "export",
|
|
|
|
"Export typeid resolutions to summary and globals")),
|
|
|
|
cl::Hidden);
|
|
|
|
|
|
|
|
static cl::opt<std::string> ClReadSummary(
|
|
|
|
"wholeprogramdevirt-read-summary",
|
|
|
|
cl::desc("Read summary from given YAML file before running pass"),
|
|
|
|
cl::Hidden);
|
|
|
|
|
|
|
|
static cl::opt<std::string> ClWriteSummary(
|
|
|
|
"wholeprogramdevirt-write-summary",
|
|
|
|
cl::desc("Write summary to given YAML file after running pass"),
|
|
|
|
cl::Hidden);
|
|
|
|
|
2018-04-07 05:41:17 +08:00
|
|
|
static cl::opt<unsigned>
|
|
|
|
ClThreshold("wholeprogramdevirt-branch-funnel-threshold", cl::Hidden,
|
|
|
|
cl::init(10), cl::ZeroOrMore,
|
|
|
|
cl::desc("Maximum number of call targets per "
|
|
|
|
"call site to enable branch funnels"));
|
2018-04-07 05:32:36 +08:00
|
|
|
|
2019-08-02 21:10:52 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
PrintSummaryDevirt("wholeprogramdevirt-print-index-based", cl::Hidden,
|
|
|
|
cl::init(false), cl::ZeroOrMore,
|
|
|
|
cl::desc("Print index-based devirtualization messages"));
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
// Find the minimum offset that we may store a value of size Size bits at. If
|
|
|
|
// IsAfter is set, look for an offset before the object, otherwise look for an
|
|
|
|
// offset after the object.
|
|
|
|
uint64_t
|
|
|
|
wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets,
|
|
|
|
bool IsAfter, uint64_t Size) {
|
|
|
|
// Find a minimum offset taking into account only vtable sizes.
|
|
|
|
uint64_t MinByte = 0;
|
|
|
|
for (const VirtualCallTarget &Target : Targets) {
|
|
|
|
if (IsAfter)
|
|
|
|
MinByte = std::max(MinByte, Target.minAfterBytes());
|
|
|
|
else
|
|
|
|
MinByte = std::max(MinByte, Target.minBeforeBytes());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a vector of arrays of bytes covering, for each target, a slice of the
|
|
|
|
// used region (see AccumBitVector::BytesUsed in
|
|
|
|
// llvm/Transforms/IPO/WholeProgramDevirt.h) starting at MinByte. Effectively,
|
|
|
|
// this aligns the used regions to start at MinByte.
|
|
|
|
//
|
|
|
|
// In this example, A, B and C are vtables, # is a byte already allocated for
|
|
|
|
// a virtual function pointer, AAAA... (etc.) are the used regions for the
|
|
|
|
// vtables and Offset(X) is the value computed for the Offset variable below
|
|
|
|
// for X.
|
|
|
|
//
|
|
|
|
// Offset(A)
|
|
|
|
// | |
|
|
|
|
// |MinByte
|
|
|
|
// A: ################AAAAAAAA|AAAAAAAA
|
|
|
|
// B: ########BBBBBBBBBBBBBBBB|BBBB
|
|
|
|
// C: ########################|CCCCCCCCCCCCCCCC
|
|
|
|
// | Offset(B) |
|
|
|
|
//
|
|
|
|
// This code produces the slices of A, B and C that appear after the divider
|
|
|
|
// at MinByte.
|
|
|
|
std::vector<ArrayRef<uint8_t>> Used;
|
|
|
|
for (const VirtualCallTarget &Target : Targets) {
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
ArrayRef<uint8_t> VTUsed = IsAfter ? Target.TM->Bits->After.BytesUsed
|
|
|
|
: Target.TM->Bits->Before.BytesUsed;
|
2016-02-10 06:50:34 +08:00
|
|
|
uint64_t Offset = IsAfter ? MinByte - Target.minAfterBytes()
|
|
|
|
: MinByte - Target.minBeforeBytes();
|
|
|
|
|
|
|
|
// Disregard used regions that are smaller than Offset. These are
|
|
|
|
// effectively all-free regions that do not need to be checked.
|
|
|
|
if (VTUsed.size() > Offset)
|
|
|
|
Used.push_back(VTUsed.slice(Offset));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Size == 1) {
|
|
|
|
// Find a free bit in each member of Used.
|
|
|
|
for (unsigned I = 0;; ++I) {
|
|
|
|
uint8_t BitsUsed = 0;
|
|
|
|
for (auto &&B : Used)
|
|
|
|
if (I < B.size())
|
|
|
|
BitsUsed |= B[I];
|
|
|
|
if (BitsUsed != 0xff)
|
|
|
|
return (MinByte + I) * 8 +
|
|
|
|
countTrailingZeros(uint8_t(~BitsUsed), ZB_Undefined);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Find a free (Size/8) byte region in each member of Used.
|
|
|
|
// FIXME: see if alignment helps.
|
|
|
|
for (unsigned I = 0;; ++I) {
|
|
|
|
for (auto &&B : Used) {
|
|
|
|
unsigned Byte = 0;
|
|
|
|
while ((I + Byte) < B.size() && Byte < (Size / 8)) {
|
|
|
|
if (B[I + Byte])
|
|
|
|
goto NextI;
|
|
|
|
++Byte;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (MinByte + I) * 8;
|
|
|
|
NextI:;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void wholeprogramdevirt::setBeforeReturnValues(
|
|
|
|
MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocBefore,
|
|
|
|
unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) {
|
|
|
|
if (BitWidth == 1)
|
|
|
|
OffsetByte = -(AllocBefore / 8 + 1);
|
|
|
|
else
|
|
|
|
OffsetByte = -((AllocBefore + 7) / 8 + (BitWidth + 7) / 8);
|
|
|
|
OffsetBit = AllocBefore % 8;
|
|
|
|
|
|
|
|
for (VirtualCallTarget &Target : Targets) {
|
|
|
|
if (BitWidth == 1)
|
|
|
|
Target.setBeforeBit(AllocBefore);
|
|
|
|
else
|
|
|
|
Target.setBeforeBytes(AllocBefore, (BitWidth + 7) / 8);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void wholeprogramdevirt::setAfterReturnValues(
|
|
|
|
MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocAfter,
|
|
|
|
unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) {
|
|
|
|
if (BitWidth == 1)
|
|
|
|
OffsetByte = AllocAfter / 8;
|
|
|
|
else
|
|
|
|
OffsetByte = (AllocAfter + 7) / 8;
|
|
|
|
OffsetBit = AllocAfter % 8;
|
|
|
|
|
|
|
|
for (VirtualCallTarget &Target : Targets) {
|
|
|
|
if (BitWidth == 1)
|
|
|
|
Target.setAfterBit(AllocAfter);
|
|
|
|
else
|
|
|
|
Target.setAfterBytes(AllocAfter, (BitWidth + 7) / 8);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
VirtualCallTarget::VirtualCallTarget(Function *Fn, const TypeMemberInfo *TM)
|
|
|
|
: Fn(Fn), TM(TM),
|
2016-08-12 09:40:10 +08:00
|
|
|
IsBigEndian(Fn->getParent()->getDataLayout().isBigEndian()), WasDevirt(false) {}
|
2016-02-10 06:50:34 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
// A slot in a set of virtual tables. The TypeID identifies the set of virtual
|
2016-02-10 06:50:34 +08:00
|
|
|
// tables, and the ByteOffset is the offset in bytes from the address point to
|
|
|
|
// the virtual function pointer.
|
|
|
|
struct VTableSlot {
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
Metadata *TypeID;
|
2016-02-10 06:50:34 +08:00
|
|
|
uint64_t ByteOffset;
|
|
|
|
};
|
|
|
|
|
2016-08-12 01:20:18 +08:00
|
|
|
} // end anonymous namespace
|
2016-02-10 06:50:34 +08:00
|
|
|
|
2016-02-10 07:01:38 +08:00
|
|
|
namespace llvm {
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
template <> struct DenseMapInfo<VTableSlot> {
|
|
|
|
static VTableSlot getEmptyKey() {
|
|
|
|
return {DenseMapInfo<Metadata *>::getEmptyKey(),
|
|
|
|
DenseMapInfo<uint64_t>::getEmptyKey()};
|
|
|
|
}
|
|
|
|
static VTableSlot getTombstoneKey() {
|
|
|
|
return {DenseMapInfo<Metadata *>::getTombstoneKey(),
|
|
|
|
DenseMapInfo<uint64_t>::getTombstoneKey()};
|
|
|
|
}
|
|
|
|
static unsigned getHashValue(const VTableSlot &I) {
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
return DenseMapInfo<Metadata *>::getHashValue(I.TypeID) ^
|
2016-02-10 06:50:34 +08:00
|
|
|
DenseMapInfo<uint64_t>::getHashValue(I.ByteOffset);
|
|
|
|
}
|
|
|
|
static bool isEqual(const VTableSlot &LHS,
|
|
|
|
const VTableSlot &RHS) {
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
return LHS.TypeID == RHS.TypeID && LHS.ByteOffset == RHS.ByteOffset;
|
2016-02-10 06:50:34 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-08-02 21:10:52 +08:00
|
|
|
template <> struct DenseMapInfo<VTableSlotSummary> {
|
|
|
|
static VTableSlotSummary getEmptyKey() {
|
|
|
|
return {DenseMapInfo<StringRef>::getEmptyKey(),
|
|
|
|
DenseMapInfo<uint64_t>::getEmptyKey()};
|
|
|
|
}
|
|
|
|
static VTableSlotSummary getTombstoneKey() {
|
|
|
|
return {DenseMapInfo<StringRef>::getTombstoneKey(),
|
|
|
|
DenseMapInfo<uint64_t>::getTombstoneKey()};
|
|
|
|
}
|
|
|
|
static unsigned getHashValue(const VTableSlotSummary &I) {
|
|
|
|
return DenseMapInfo<StringRef>::getHashValue(I.TypeID) ^
|
|
|
|
DenseMapInfo<uint64_t>::getHashValue(I.ByteOffset);
|
|
|
|
}
|
|
|
|
static bool isEqual(const VTableSlotSummary &LHS,
|
|
|
|
const VTableSlotSummary &RHS) {
|
|
|
|
return LHS.TypeID == RHS.TypeID && LHS.ByteOffset == RHS.ByteOffset;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-08-12 01:20:18 +08:00
|
|
|
} // end namespace llvm
|
2016-02-10 07:01:38 +08:00
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
// A virtual call site. VTable is the loaded virtual table pointer, and CS is
|
|
|
|
// the indirect virtual call.
|
|
|
|
struct VirtualCallSite {
|
|
|
|
Value *VTable;
|
|
|
|
CallSite CS;
|
|
|
|
|
2016-06-25 08:23:04 +08:00
|
|
|
// If non-null, this field points to the associated unsafe use count stored in
|
|
|
|
// the DevirtModule::NumUnsafeUsesForTypeTest map below. See the description
|
|
|
|
// of that field for details.
|
|
|
|
unsigned *NumUnsafeUses;
|
|
|
|
|
2017-08-22 00:57:21 +08:00
|
|
|
void
|
|
|
|
emitRemark(const StringRef OptName, const StringRef TargetName,
|
|
|
|
function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter) {
|
2016-07-12 10:38:37 +08:00
|
|
|
Function *F = CS.getCaller();
|
2017-08-22 00:57:21 +08:00
|
|
|
DebugLoc DLoc = CS->getDebugLoc();
|
|
|
|
BasicBlock *Block = CS.getParent();
|
|
|
|
|
|
|
|
using namespace ore;
|
2018-01-05 08:27:51 +08:00
|
|
|
OREGetter(F).emit(OptimizationRemark(DEBUG_TYPE, OptName, DLoc, Block)
|
|
|
|
<< NV("Optimization", OptName)
|
|
|
|
<< ": devirtualized a call to "
|
|
|
|
<< NV("FunctionName", TargetName));
|
2016-07-12 10:38:37 +08:00
|
|
|
}
|
|
|
|
|
2017-08-22 00:57:21 +08:00
|
|
|
void replaceAndErase(
|
|
|
|
const StringRef OptName, const StringRef TargetName, bool RemarksEnabled,
|
|
|
|
function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
|
|
|
|
Value *New) {
|
2016-08-12 03:09:02 +08:00
|
|
|
if (RemarksEnabled)
|
2017-08-22 00:57:21 +08:00
|
|
|
emitRemark(OptName, TargetName, OREGetter);
|
2016-02-10 06:50:34 +08:00
|
|
|
CS->replaceAllUsesWith(New);
|
|
|
|
if (auto II = dyn_cast<InvokeInst>(CS.getInstruction())) {
|
|
|
|
BranchInst::Create(II->getNormalDest(), CS.getInstruction());
|
|
|
|
II->getUnwindDest()->removePredecessor(II->getParent());
|
|
|
|
}
|
|
|
|
CS->eraseFromParent();
|
2016-06-25 08:23:04 +08:00
|
|
|
// This use is no longer unsafe.
|
|
|
|
if (NumUnsafeUses)
|
|
|
|
--*NumUnsafeUses;
|
2016-02-10 06:50:34 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-02-16 05:56:51 +08:00
|
|
|
// Call site information collected for a specific VTableSlot and possibly a list
|
|
|
|
// of constant integer arguments. The grouping by arguments is handled by the
|
|
|
|
// VTableSlotInfo class.
|
|
|
|
struct CallSiteInfo {
|
2017-03-04 09:23:30 +08:00
|
|
|
/// The set of call sites for this slot. Used during regular LTO and the
|
|
|
|
/// import phase of ThinLTO (as well as the export phase of ThinLTO for any
|
|
|
|
/// call sites that appear in the merged module itself); in each of these
|
|
|
|
/// cases we are directly operating on the call sites at the IR level.
|
2017-02-16 05:56:51 +08:00
|
|
|
std::vector<VirtualCallSite> CallSites;
|
2017-03-04 09:23:30 +08:00
|
|
|
|
2018-03-10 03:11:44 +08:00
|
|
|
/// Whether all call sites represented by this CallSiteInfo, including those
|
|
|
|
/// in summaries, have been devirtualized. This starts off as true because a
|
|
|
|
/// default constructed CallSiteInfo represents no call sites.
|
|
|
|
bool AllCallSitesDevirted = true;
|
|
|
|
|
2017-03-04 09:23:30 +08:00
|
|
|
// These fields are used during the export phase of ThinLTO and reflect
|
|
|
|
// information collected from function summaries.
|
|
|
|
|
2017-03-04 09:31:01 +08:00
|
|
|
/// Whether any function summary contains an llvm.assume(llvm.type.test) for
|
|
|
|
/// this slot.
|
2018-03-10 03:11:44 +08:00
|
|
|
bool SummaryHasTypeTestAssumeUsers = false;
|
2017-03-04 09:31:01 +08:00
|
|
|
|
2017-03-04 09:23:30 +08:00
|
|
|
/// CFI-specific: a vector containing the list of function summaries that use
|
|
|
|
/// the llvm.type.checked.load intrinsic and therefore will require
|
|
|
|
/// resolutions for llvm.type.test in order to implement CFI checks if
|
|
|
|
/// devirtualization was unsuccessful. If devirtualization was successful, the
|
2017-03-11 04:09:11 +08:00
|
|
|
/// pass will clear this vector by calling markDevirt(). If at the end of the
|
|
|
|
/// pass the vector is non-empty, we will need to add a use of llvm.type.test
|
|
|
|
/// to each of the function summaries in the vector.
|
2017-03-04 09:23:30 +08:00
|
|
|
std::vector<FunctionSummary *> SummaryTypeCheckedLoadUsers;
|
2019-08-02 21:10:52 +08:00
|
|
|
std::vector<FunctionSummary *> SummaryTypeTestAssumeUsers;
|
2017-03-04 09:31:01 +08:00
|
|
|
|
|
|
|
bool isExported() const {
|
|
|
|
return SummaryHasTypeTestAssumeUsers ||
|
|
|
|
!SummaryTypeCheckedLoadUsers.empty();
|
|
|
|
}
|
2017-03-11 04:09:11 +08:00
|
|
|
|
2018-03-10 03:11:44 +08:00
|
|
|
void addSummaryTypeCheckedLoadUser(FunctionSummary *FS) {
|
|
|
|
SummaryTypeCheckedLoadUsers.push_back(FS);
|
|
|
|
AllCallSitesDevirted = false;
|
|
|
|
}
|
|
|
|
|
2019-08-02 21:10:52 +08:00
|
|
|
void addSummaryTypeTestAssumeUser(FunctionSummary *FS) {
|
|
|
|
SummaryTypeTestAssumeUsers.push_back(FS);
|
2019-10-17 15:46:18 +08:00
|
|
|
SummaryHasTypeTestAssumeUsers = true;
|
|
|
|
AllCallSitesDevirted = false;
|
2019-08-02 21:10:52 +08:00
|
|
|
}
|
|
|
|
|
2018-03-10 03:11:44 +08:00
|
|
|
void markDevirt() {
|
|
|
|
AllCallSitesDevirted = true;
|
|
|
|
|
|
|
|
// As explained in the comment for SummaryTypeCheckedLoadUsers.
|
|
|
|
SummaryTypeCheckedLoadUsers.clear();
|
|
|
|
}
|
2017-02-16 05:56:51 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// Call site information collected for a specific VTableSlot.
|
|
|
|
struct VTableSlotInfo {
|
|
|
|
// The set of call sites which do not have all constant integer arguments
|
|
|
|
// (excluding "this").
|
|
|
|
CallSiteInfo CSInfo;
|
|
|
|
|
|
|
|
// The set of call sites with all constant integer arguments (excluding
|
|
|
|
// "this"), grouped by argument list.
|
|
|
|
std::map<std::vector<uint64_t>, CallSiteInfo> ConstCSInfo;
|
|
|
|
|
|
|
|
void addCallSite(Value *VTable, CallSite CS, unsigned *NumUnsafeUses);
|
|
|
|
|
|
|
|
private:
|
|
|
|
CallSiteInfo &findCallSiteInfo(CallSite CS);
|
|
|
|
};
|
|
|
|
|
|
|
|
CallSiteInfo &VTableSlotInfo::findCallSiteInfo(CallSite CS) {
|
|
|
|
std::vector<uint64_t> Args;
|
|
|
|
auto *CI = dyn_cast<IntegerType>(CS.getType());
|
|
|
|
if (!CI || CI->getBitWidth() > 64 || CS.arg_empty())
|
|
|
|
return CSInfo;
|
|
|
|
for (auto &&Arg : make_range(CS.arg_begin() + 1, CS.arg_end())) {
|
|
|
|
auto *CI = dyn_cast<ConstantInt>(Arg);
|
|
|
|
if (!CI || CI->getBitWidth() > 64)
|
|
|
|
return CSInfo;
|
|
|
|
Args.push_back(CI->getZExtValue());
|
|
|
|
}
|
|
|
|
return ConstCSInfo[Args];
|
|
|
|
}
|
|
|
|
|
|
|
|
void VTableSlotInfo::addCallSite(Value *VTable, CallSite CS,
|
|
|
|
unsigned *NumUnsafeUses) {
|
2018-03-10 03:11:44 +08:00
|
|
|
auto &CSI = findCallSiteInfo(CS);
|
|
|
|
CSI.AllCallSitesDevirted = false;
|
|
|
|
CSI.CallSites.push_back({VTable, CS, NumUnsafeUses});
|
2017-02-16 05:56:51 +08:00
|
|
|
}
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
struct DevirtModule {
|
|
|
|
Module &M;
|
2017-02-18 02:17:04 +08:00
|
|
|
function_ref<AAResults &(Function &)> AARGetter;
|
2018-09-27 22:55:32 +08:00
|
|
|
function_ref<DominatorTree &(Function &)> LookupDomTree;
|
2017-02-14 03:26:18 +08:00
|
|
|
|
2017-03-23 02:22:59 +08:00
|
|
|
ModuleSummaryIndex *ExportSummary;
|
|
|
|
const ModuleSummaryIndex *ImportSummary;
|
2017-02-14 03:26:18 +08:00
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
IntegerType *Int8Ty;
|
|
|
|
PointerType *Int8PtrTy;
|
|
|
|
IntegerType *Int32Ty;
|
2017-02-16 05:56:51 +08:00
|
|
|
IntegerType *Int64Ty;
|
2017-03-11 04:13:58 +08:00
|
|
|
IntegerType *IntPtrTy;
|
2016-02-10 06:50:34 +08:00
|
|
|
|
2016-08-12 03:09:02 +08:00
|
|
|
bool RemarksEnabled;
|
2017-08-22 00:57:21 +08:00
|
|
|
function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter;
|
2016-08-12 03:09:02 +08:00
|
|
|
|
2017-02-16 05:56:51 +08:00
|
|
|
MapVector<VTableSlot, VTableSlotInfo> CallSlots;
|
2016-02-10 06:50:34 +08:00
|
|
|
|
2016-06-25 08:23:04 +08:00
|
|
|
// This map keeps track of the number of "unsafe" uses of a loaded function
|
|
|
|
// pointer. The key is the associated llvm.type.test intrinsic call generated
|
|
|
|
// by this pass. An unsafe use is one that calls the loaded function pointer
|
|
|
|
// directly. Every time we eliminate an unsafe use (for example, by
|
|
|
|
// devirtualizing it or by applying virtual constant propagation), we
|
|
|
|
// decrement the value stored in this map. If a value reaches zero, we can
|
|
|
|
// eliminate the type check by RAUWing the associated llvm.type.test call with
|
|
|
|
// true.
|
|
|
|
std::map<CallInst *, unsigned> NumUnsafeUsesForTypeTest;
|
|
|
|
|
2017-02-18 02:17:04 +08:00
|
|
|
DevirtModule(Module &M, function_ref<AAResults &(Function &)> AARGetter,
|
2017-08-22 00:57:21 +08:00
|
|
|
function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
|
2018-09-27 22:55:32 +08:00
|
|
|
function_ref<DominatorTree &(Function &)> LookupDomTree,
|
2017-03-23 02:22:59 +08:00
|
|
|
ModuleSummaryIndex *ExportSummary,
|
|
|
|
const ModuleSummaryIndex *ImportSummary)
|
2018-09-27 22:55:32 +08:00
|
|
|
: M(M), AARGetter(AARGetter), LookupDomTree(LookupDomTree),
|
|
|
|
ExportSummary(ExportSummary), ImportSummary(ImportSummary),
|
|
|
|
Int8Ty(Type::getInt8Ty(M.getContext())),
|
2016-02-10 06:50:34 +08:00
|
|
|
Int8PtrTy(Type::getInt8PtrTy(M.getContext())),
|
2016-08-12 03:09:02 +08:00
|
|
|
Int32Ty(Type::getInt32Ty(M.getContext())),
|
2017-02-16 05:56:51 +08:00
|
|
|
Int64Ty(Type::getInt64Ty(M.getContext())),
|
2017-03-11 04:13:58 +08:00
|
|
|
IntPtrTy(M.getDataLayout().getIntPtrType(M.getContext(), 0)),
|
2017-08-22 00:57:21 +08:00
|
|
|
RemarksEnabled(areRemarksEnabled()), OREGetter(OREGetter) {
|
2017-03-23 02:22:59 +08:00
|
|
|
assert(!(ExportSummary && ImportSummary));
|
|
|
|
}
|
2016-08-12 03:09:02 +08:00
|
|
|
|
|
|
|
bool areRemarksEnabled();
|
2016-02-10 06:50:34 +08:00
|
|
|
|
2016-06-25 08:23:04 +08:00
|
|
|
void scanTypeTestUsers(Function *TypeTestFunc, Function *AssumeFunc);
|
|
|
|
void scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc);
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
void buildTypeIdentifierMap(
|
|
|
|
std::vector<VTableBits> &Bits,
|
|
|
|
DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap);
|
|
|
|
bool
|
|
|
|
tryFindVirtualCallTargets(std::vector<VirtualCallTarget> &TargetsForSlot,
|
|
|
|
const std::set<TypeMemberInfo> &TypeMemberInfos,
|
|
|
|
uint64_t ByteOffset);
|
2017-02-16 05:56:51 +08:00
|
|
|
|
2017-03-04 09:31:01 +08:00
|
|
|
void applySingleImplDevirt(VTableSlotInfo &SlotInfo, Constant *TheFn,
|
|
|
|
bool &IsExported);
|
2019-10-17 15:46:18 +08:00
|
|
|
bool trySingleImplDevirt(ModuleSummaryIndex *ExportSummary,
|
|
|
|
MutableArrayRef<VirtualCallTarget> TargetsForSlot,
|
2017-03-04 09:31:01 +08:00
|
|
|
VTableSlotInfo &SlotInfo,
|
|
|
|
WholeProgramDevirtResolution *Res);
|
2017-02-16 05:56:51 +08:00
|
|
|
|
2018-03-10 03:11:44 +08:00
|
|
|
void applyICallBranchFunnel(VTableSlotInfo &SlotInfo, Constant *JT,
|
|
|
|
bool &IsExported);
|
|
|
|
void tryICallBranchFunnel(MutableArrayRef<VirtualCallTarget> TargetsForSlot,
|
|
|
|
VTableSlotInfo &SlotInfo,
|
|
|
|
WholeProgramDevirtResolution *Res, VTableSlot Slot);
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
bool tryEvaluateFunctionsWithArgs(
|
|
|
|
MutableArrayRef<VirtualCallTarget> TargetsForSlot,
|
2017-02-16 05:56:51 +08:00
|
|
|
ArrayRef<uint64_t> Args);
|
|
|
|
|
|
|
|
void applyUniformRetValOpt(CallSiteInfo &CSInfo, StringRef FnName,
|
|
|
|
uint64_t TheRetVal);
|
|
|
|
bool tryUniformRetValOpt(MutableArrayRef<VirtualCallTarget> TargetsForSlot,
|
2017-03-04 09:34:53 +08:00
|
|
|
CallSiteInfo &CSInfo,
|
|
|
|
WholeProgramDevirtResolution::ByArg *Res);
|
2017-02-16 05:56:51 +08:00
|
|
|
|
2017-03-11 04:09:11 +08:00
|
|
|
// Returns the global symbol name that is used to export information about the
|
|
|
|
// given vtable slot and list of arguments.
|
|
|
|
std::string getGlobalName(VTableSlot Slot, ArrayRef<uint64_t> Args,
|
|
|
|
StringRef Name);
|
|
|
|
|
2017-09-12 06:34:42 +08:00
|
|
|
bool shouldExportConstantsAsAbsoluteSymbols();
|
|
|
|
|
2017-03-11 04:09:11 +08:00
|
|
|
// This function is called during the export phase to create a symbol
|
|
|
|
// definition containing information about the given vtable slot and list of
|
|
|
|
// arguments.
|
|
|
|
void exportGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args, StringRef Name,
|
|
|
|
Constant *C);
|
2017-09-12 06:34:42 +08:00
|
|
|
void exportConstant(VTableSlot Slot, ArrayRef<uint64_t> Args, StringRef Name,
|
|
|
|
uint32_t Const, uint32_t &Storage);
|
2017-03-11 04:09:11 +08:00
|
|
|
|
|
|
|
// This function is called during the import phase to create a reference to
|
|
|
|
// the symbol definition created during the export phase.
|
|
|
|
Constant *importGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args,
|
2017-09-12 06:34:42 +08:00
|
|
|
StringRef Name);
|
|
|
|
Constant *importConstant(VTableSlot Slot, ArrayRef<uint64_t> Args,
|
|
|
|
StringRef Name, IntegerType *IntTy,
|
|
|
|
uint32_t Storage);
|
2017-03-11 04:09:11 +08:00
|
|
|
|
2018-03-10 03:11:44 +08:00
|
|
|
Constant *getMemberAddr(const TypeMemberInfo *M);
|
|
|
|
|
2017-02-16 05:56:51 +08:00
|
|
|
void applyUniqueRetValOpt(CallSiteInfo &CSInfo, StringRef FnName, bool IsOne,
|
|
|
|
Constant *UniqueMemberAddr);
|
2016-02-10 06:50:34 +08:00
|
|
|
bool tryUniqueRetValOpt(unsigned BitWidth,
|
2016-08-12 03:09:02 +08:00
|
|
|
MutableArrayRef<VirtualCallTarget> TargetsForSlot,
|
2017-03-11 04:09:11 +08:00
|
|
|
CallSiteInfo &CSInfo,
|
|
|
|
WholeProgramDevirtResolution::ByArg *Res,
|
|
|
|
VTableSlot Slot, ArrayRef<uint64_t> Args);
|
2017-02-16 05:56:51 +08:00
|
|
|
|
|
|
|
void applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName,
|
|
|
|
Constant *Byte, Constant *Bit);
|
2016-02-10 06:50:34 +08:00
|
|
|
bool tryVirtualConstProp(MutableArrayRef<VirtualCallTarget> TargetsForSlot,
|
2017-03-04 09:34:53 +08:00
|
|
|
VTableSlotInfo &SlotInfo,
|
2017-03-11 04:09:11 +08:00
|
|
|
WholeProgramDevirtResolution *Res, VTableSlot Slot);
|
2016-02-10 06:50:34 +08:00
|
|
|
|
|
|
|
void rebuildGlobal(VTableBits &B);
|
|
|
|
|
2017-03-09 08:21:25 +08:00
|
|
|
// Apply the summary resolution for Slot to all virtual calls in SlotInfo.
|
|
|
|
void importResolution(VTableSlot Slot, VTableSlotInfo &SlotInfo);
|
|
|
|
|
|
|
|
// If we were able to eliminate all unsafe uses for a type checked load,
|
|
|
|
// eliminate the associated type tests by replacing them with true.
|
|
|
|
void removeRedundantTypeTests();
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
bool run();
|
2017-02-14 03:26:18 +08:00
|
|
|
|
|
|
|
// Lower the module using the action and summary passed as command line
|
|
|
|
// arguments. For testing purposes only.
|
2018-09-27 22:55:32 +08:00
|
|
|
static bool
|
|
|
|
runForTesting(Module &M, function_ref<AAResults &(Function &)> AARGetter,
|
|
|
|
function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
|
|
|
|
function_ref<DominatorTree &(Function &)> LookupDomTree);
|
2016-02-10 06:50:34 +08:00
|
|
|
};
|
|
|
|
|
2019-08-02 21:10:52 +08:00
|
|
|
struct DevirtIndex {
|
|
|
|
ModuleSummaryIndex &ExportSummary;
|
|
|
|
// The set in which to record GUIDs exported from their module by
|
|
|
|
// devirtualization, used by client to ensure they are not internalized.
|
|
|
|
std::set<GlobalValue::GUID> &ExportedGUIDs;
|
|
|
|
// A map in which to record the information necessary to locate the WPD
|
|
|
|
// resolution for local targets in case they are exported by cross module
|
|
|
|
// importing.
|
|
|
|
std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap;
|
|
|
|
|
|
|
|
MapVector<VTableSlotSummary, VTableSlotInfo> CallSlots;
|
|
|
|
|
|
|
|
DevirtIndex(
|
|
|
|
ModuleSummaryIndex &ExportSummary,
|
|
|
|
std::set<GlobalValue::GUID> &ExportedGUIDs,
|
|
|
|
std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap)
|
|
|
|
: ExportSummary(ExportSummary), ExportedGUIDs(ExportedGUIDs),
|
|
|
|
LocalWPDTargetsMap(LocalWPDTargetsMap) {}
|
|
|
|
|
|
|
|
bool tryFindVirtualCallTargets(std::vector<ValueInfo> &TargetsForSlot,
|
|
|
|
const TypeIdCompatibleVtableInfo TIdInfo,
|
|
|
|
uint64_t ByteOffset);
|
|
|
|
|
|
|
|
bool trySingleImplDevirt(MutableArrayRef<ValueInfo> TargetsForSlot,
|
|
|
|
VTableSlotSummary &SlotSummary,
|
|
|
|
VTableSlotInfo &SlotInfo,
|
|
|
|
WholeProgramDevirtResolution *Res,
|
|
|
|
std::set<ValueInfo> &DevirtTargets);
|
|
|
|
|
|
|
|
void run();
|
|
|
|
};
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
struct WholeProgramDevirt : public ModulePass {
|
|
|
|
static char ID;
|
2016-08-12 01:20:18 +08:00
|
|
|
|
2017-02-14 03:26:18 +08:00
|
|
|
bool UseCommandLine = false;
|
|
|
|
|
2017-03-23 02:22:59 +08:00
|
|
|
ModuleSummaryIndex *ExportSummary;
|
|
|
|
const ModuleSummaryIndex *ImportSummary;
|
2017-02-14 03:26:18 +08:00
|
|
|
|
|
|
|
WholeProgramDevirt() : ModulePass(ID), UseCommandLine(true) {
|
|
|
|
initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
2017-03-23 02:22:59 +08:00
|
|
|
WholeProgramDevirt(ModuleSummaryIndex *ExportSummary,
|
|
|
|
const ModuleSummaryIndex *ImportSummary)
|
|
|
|
: ModulePass(ID), ExportSummary(ExportSummary),
|
|
|
|
ImportSummary(ImportSummary) {
|
2016-02-10 06:50:34 +08:00
|
|
|
initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2016-08-12 01:20:18 +08:00
|
|
|
|
|
|
|
bool runOnModule(Module &M) override {
|
2016-04-23 06:06:11 +08:00
|
|
|
if (skipModule(M))
|
|
|
|
return false;
|
2017-08-22 00:57:21 +08:00
|
|
|
|
2018-01-05 08:27:51 +08:00
|
|
|
// In the new pass manager, we can request the optimization
|
|
|
|
// remark emitter pass on a per-function-basis, which the
|
|
|
|
// OREGetter will do for us.
|
|
|
|
// In the old pass manager, this is harder, so we just build
|
|
|
|
// an optimization remark emitter on the fly, when we need it.
|
|
|
|
std::unique_ptr<OptimizationRemarkEmitter> ORE;
|
|
|
|
auto OREGetter = [&](Function *F) -> OptimizationRemarkEmitter & {
|
2019-08-15 23:54:37 +08:00
|
|
|
ORE = std::make_unique<OptimizationRemarkEmitter>(F);
|
2018-01-05 08:27:51 +08:00
|
|
|
return *ORE;
|
|
|
|
};
|
2017-08-22 00:57:21 +08:00
|
|
|
|
2018-09-27 22:55:32 +08:00
|
|
|
auto LookupDomTree = [this](Function &F) -> DominatorTree & {
|
|
|
|
return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
|
|
|
|
};
|
|
|
|
|
2017-02-14 03:26:18 +08:00
|
|
|
if (UseCommandLine)
|
2018-09-27 22:55:32 +08:00
|
|
|
return DevirtModule::runForTesting(M, LegacyAARGetter(*this), OREGetter,
|
|
|
|
LookupDomTree);
|
2017-08-22 00:57:21 +08:00
|
|
|
|
2018-09-27 22:55:32 +08:00
|
|
|
return DevirtModule(M, LegacyAARGetter(*this), OREGetter, LookupDomTree,
|
|
|
|
ExportSummary, ImportSummary)
|
2017-03-23 02:22:59 +08:00
|
|
|
.run();
|
2017-02-18 02:17:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
|
|
|
AU.addRequired<TargetLibraryInfoWrapperPass>();
|
2018-09-27 22:55:32 +08:00
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
2016-04-23 06:06:11 +08:00
|
|
|
}
|
2016-02-10 06:50:34 +08:00
|
|
|
};
|
|
|
|
|
2016-08-12 01:20:18 +08:00
|
|
|
} // end anonymous namespace
|
2016-02-10 06:50:34 +08:00
|
|
|
|
2017-02-18 02:17:04 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(WholeProgramDevirt, "wholeprogramdevirt",
|
|
|
|
"Whole program devirtualization", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
|
2018-09-27 22:55:32 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
2017-02-18 02:17:04 +08:00
|
|
|
INITIALIZE_PASS_END(WholeProgramDevirt, "wholeprogramdevirt",
|
|
|
|
"Whole program devirtualization", false, false)
|
2016-02-10 06:50:34 +08:00
|
|
|
char WholeProgramDevirt::ID = 0;
|
|
|
|
|
2017-03-23 02:22:59 +08:00
|
|
|
ModulePass *
|
|
|
|
llvm::createWholeProgramDevirtPass(ModuleSummaryIndex *ExportSummary,
|
|
|
|
const ModuleSummaryIndex *ImportSummary) {
|
|
|
|
return new WholeProgramDevirt(ExportSummary, ImportSummary);
|
2016-02-10 06:50:34 +08:00
|
|
|
}
|
|
|
|
|
2016-06-17 08:11:01 +08:00
|
|
|
PreservedAnalyses WholeProgramDevirtPass::run(Module &M,
|
2017-02-18 02:17:04 +08:00
|
|
|
ModuleAnalysisManager &AM) {
|
|
|
|
auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
|
|
|
|
auto AARGetter = [&](Function &F) -> AAResults & {
|
|
|
|
return FAM.getResult<AAManager>(F);
|
|
|
|
};
|
2017-08-22 00:57:21 +08:00
|
|
|
auto OREGetter = [&](Function *F) -> OptimizationRemarkEmitter & {
|
|
|
|
return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F);
|
|
|
|
};
|
2018-09-27 22:55:32 +08:00
|
|
|
auto LookupDomTree = [&FAM](Function &F) -> DominatorTree & {
|
|
|
|
return FAM.getResult<DominatorTreeAnalysis>(F);
|
|
|
|
};
|
|
|
|
if (!DevirtModule(M, AARGetter, OREGetter, LookupDomTree, ExportSummary,
|
|
|
|
ImportSummary)
|
2018-07-19 22:51:32 +08:00
|
|
|
.run())
|
2016-06-15 05:44:19 +08:00
|
|
|
return PreservedAnalyses::all();
|
|
|
|
return PreservedAnalyses::none();
|
|
|
|
}
|
|
|
|
|
2019-08-02 21:10:52 +08:00
|
|
|
namespace llvm {
|
|
|
|
void runWholeProgramDevirtOnIndex(
|
|
|
|
ModuleSummaryIndex &Summary, std::set<GlobalValue::GUID> &ExportedGUIDs,
|
|
|
|
std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) {
|
|
|
|
DevirtIndex(Summary, ExportedGUIDs, LocalWPDTargetsMap).run();
|
|
|
|
}
|
|
|
|
|
|
|
|
void updateIndexWPDForExports(
|
|
|
|
ModuleSummaryIndex &Summary,
|
2019-10-03 00:36:59 +08:00
|
|
|
function_ref<bool(StringRef, GlobalValue::GUID)> isExported,
|
2019-08-02 21:10:52 +08:00
|
|
|
std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) {
|
|
|
|
for (auto &T : LocalWPDTargetsMap) {
|
|
|
|
auto &VI = T.first;
|
|
|
|
// This was enforced earlier during trySingleImplDevirt.
|
|
|
|
assert(VI.getSummaryList().size() == 1 &&
|
|
|
|
"Devirt of local target has more than one copy");
|
|
|
|
auto &S = VI.getSummaryList()[0];
|
2019-10-03 00:36:59 +08:00
|
|
|
if (!isExported(S->modulePath(), VI.getGUID()))
|
2019-08-02 21:10:52 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// It's been exported by a cross module import.
|
|
|
|
for (auto &SlotSummary : T.second) {
|
|
|
|
auto *TIdSum = Summary.getTypeIdSummary(SlotSummary.TypeID);
|
|
|
|
assert(TIdSum);
|
|
|
|
auto WPDRes = TIdSum->WPDRes.find(SlotSummary.ByteOffset);
|
|
|
|
assert(WPDRes != TIdSum->WPDRes.end());
|
|
|
|
WPDRes->second.SingleImplName = ModuleSummaryIndex::getGlobalNameForLocal(
|
|
|
|
WPDRes->second.SingleImplName,
|
|
|
|
Summary.getModuleHash(S->modulePath()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // end namespace llvm
|
|
|
|
|
2017-02-18 02:17:04 +08:00
|
|
|
bool DevirtModule::runForTesting(
|
2017-08-22 00:57:21 +08:00
|
|
|
Module &M, function_ref<AAResults &(Function &)> AARGetter,
|
2018-09-27 22:55:32 +08:00
|
|
|
function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
|
|
|
|
function_ref<DominatorTree &(Function &)> LookupDomTree) {
|
2018-06-07 06:22:01 +08:00
|
|
|
ModuleSummaryIndex Summary(/*HaveGVs=*/false);
|
2017-02-14 03:26:18 +08:00
|
|
|
|
|
|
|
// Handle the command-line summary arguments. This code is for testing
|
|
|
|
// purposes only, so we handle errors directly.
|
|
|
|
if (!ClReadSummary.empty()) {
|
|
|
|
ExitOnError ExitOnErr("-wholeprogramdevirt-read-summary: " + ClReadSummary +
|
|
|
|
": ");
|
|
|
|
auto ReadSummaryFile =
|
|
|
|
ExitOnErr(errorOrToExpected(MemoryBuffer::getFile(ClReadSummary)));
|
|
|
|
|
|
|
|
yaml::Input In(ReadSummaryFile->getBuffer());
|
|
|
|
In >> Summary;
|
|
|
|
ExitOnErr(errorCodeToError(In.error()));
|
|
|
|
}
|
|
|
|
|
2017-03-23 02:22:59 +08:00
|
|
|
bool Changed =
|
|
|
|
DevirtModule(
|
2018-09-27 22:55:32 +08:00
|
|
|
M, AARGetter, OREGetter, LookupDomTree,
|
2017-03-23 02:22:59 +08:00
|
|
|
ClSummaryAction == PassSummaryAction::Export ? &Summary : nullptr,
|
|
|
|
ClSummaryAction == PassSummaryAction::Import ? &Summary : nullptr)
|
|
|
|
.run();
|
2017-02-14 03:26:18 +08:00
|
|
|
|
|
|
|
if (!ClWriteSummary.empty()) {
|
|
|
|
ExitOnError ExitOnErr(
|
|
|
|
"-wholeprogramdevirt-write-summary: " + ClWriteSummary + ": ");
|
|
|
|
std::error_code EC;
|
2019-08-05 13:43:48 +08:00
|
|
|
raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_Text);
|
2017-02-14 03:26:18 +08:00
|
|
|
ExitOnErr(errorCodeToError(EC));
|
|
|
|
|
|
|
|
yaml::Output Out(OS);
|
|
|
|
Out << Summary;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
void DevirtModule::buildTypeIdentifierMap(
|
2016-02-10 06:50:34 +08:00
|
|
|
std::vector<VTableBits> &Bits,
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap) {
|
2016-02-10 06:50:34 +08:00
|
|
|
DenseMap<GlobalVariable *, VTableBits *> GVToBits;
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
Bits.reserve(M.getGlobalList().size());
|
|
|
|
SmallVector<MDNode *, 2> Types;
|
|
|
|
for (GlobalVariable &GV : M.globals()) {
|
|
|
|
Types.clear();
|
|
|
|
GV.getMetadata(LLVMContext::MD_type, Types);
|
2018-09-23 21:27:47 +08:00
|
|
|
if (GV.isDeclaration() || Types.empty())
|
2016-02-10 06:50:34 +08:00
|
|
|
continue;
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
VTableBits *&BitsPtr = GVToBits[&GV];
|
2016-02-10 06:50:34 +08:00
|
|
|
if (!BitsPtr) {
|
|
|
|
Bits.emplace_back();
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
Bits.back().GV = &GV;
|
|
|
|
Bits.back().ObjectSize =
|
|
|
|
M.getDataLayout().getTypeAllocSize(GV.getInitializer()->getType());
|
2016-02-10 06:50:34 +08:00
|
|
|
BitsPtr = &Bits.back();
|
|
|
|
}
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
|
|
|
|
for (MDNode *Type : Types) {
|
|
|
|
auto TypeID = Type->getOperand(1).get();
|
|
|
|
|
|
|
|
uint64_t Offset =
|
|
|
|
cast<ConstantInt>(
|
|
|
|
cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
|
|
|
|
->getZExtValue();
|
|
|
|
|
|
|
|
TypeIdMap[TypeID].insert({BitsPtr, Offset});
|
|
|
|
}
|
2016-02-10 06:50:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DevirtModule::tryFindVirtualCallTargets(
|
|
|
|
std::vector<VirtualCallTarget> &TargetsForSlot,
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
const std::set<TypeMemberInfo> &TypeMemberInfos, uint64_t ByteOffset) {
|
|
|
|
for (const TypeMemberInfo &TM : TypeMemberInfos) {
|
|
|
|
if (!TM.Bits->GV->isConstant())
|
2016-02-10 06:50:34 +08:00
|
|
|
return false;
|
|
|
|
|
2016-12-09 09:10:11 +08:00
|
|
|
Constant *Ptr = getPointerAtOffset(TM.Bits->GV->getInitializer(),
|
Reland: Dead Virtual Function Elimination
Remove dead virtual functions from vtables with
replaceNonMetadataUsesWith, so that CGProfile metadata gets cleaned up
correctly.
Original commit message:
Currently, it is hard for the compiler to remove unused C++ virtual
functions, because they are all referenced from vtables, which are referenced
by constructors. This means that if the constructor is called from any live
code, then we keep every virtual function in the final link, even if there
are no call sites which can use it.
This patch allows unused virtual functions to be removed during LTO (and
regular compilation in limited circumstances) by using type metadata to match
virtual function call sites to the vtable slots they might load from. This
information can then be used in the global dead code elimination pass instead
of the references from vtables to virtual functions, to more accurately
determine which functions are reachable.
To make this transformation safe, I have changed clang's code-generation to
always load virtual function pointers using the llvm.type.checked.load
intrinsic, instead of regular load instructions. I originally tried writing
this using clang's existing code-generation, which uses the llvm.type.test
and llvm.assume intrinsics after doing a normal load. However, it is possible
for optimisations to obscure the relationship between the GEP, load and
llvm.type.test, causing GlobalDCE to fail to find virtual function call
sites.
The existing linkage and visibility types don't accurately describe the scope
in which a virtual call could be made which uses a given vtable. This is
wider than the visibility of the type itself, because a virtual function call
could be made using a more-visible base class. I've added a new
!vcall_visibility metadata type to represent this, described in
TypeMetadata.rst. The internalization pass and libLTO have been updated to
change this metadata when linking is performed.
This doesn't currently work with ThinLTO, because it needs to see every call
to llvm.type.checked.load in the linkage unit. It might be possible to
extend this optimisation to be able to use the ThinLTO summary, as was done
for devirtualization, but until then that combination is rejected in the
clang driver.
To test this, I've written a fuzzer which generates random C++ programs with
complex class inheritance graphs, and virtual functions called through object
and function pointers of different types. The programs are spread across
multiple translation units and DSOs to test the different visibility
restrictions.
I've also tried doing bootstrap builds of LLVM to test this. This isn't
ideal, because only classes in anonymous namespaces can be optimised with
-fvisibility=default, and some parts of LLVM (plugins and bugpoint) do not
work correctly with -fvisibility=hidden. However, there are only 12 test
failures when building with -fvisibility=hidden (and an unmodified compiler),
and this change does not cause any new failures for either value of
-fvisibility.
On the 7 C++ sub-benchmarks of SPEC2006, this gives a geomean code-size
reduction of ~6%, over a baseline compiled with "-O2 -flto
-fvisibility=hidden -fwhole-program-vtables". The best cases are reductions
of ~14% in 450.soplex and 483.xalancbmk, and there are no code size
increases.
I've also run this on a set of 8 mbed-os examples compiled for Armv7M, which
show a geomean size reduction of ~3%, again with no size increases.
I had hoped that this would have no effect on performance, which would allow
it to awlays be enabled (when using -fwhole-program-vtables). However, the
changes in clang to use the llvm.type.checked.load intrinsic are causing ~1%
performance regression in the C++ parts of SPEC2006. It should be possible to
recover some of this perf loss by teaching optimisations about the
llvm.type.checked.load intrinsic, which would make it worth turning this on
by default (though it's still dependent on -fwhole-program-vtables).
Differential revision: https://reviews.llvm.org/D63932
llvm-svn: 375094
2019-10-17 17:58:57 +08:00
|
|
|
TM.Offset + ByteOffset, M);
|
2016-12-09 09:10:11 +08:00
|
|
|
if (!Ptr)
|
2016-02-10 06:50:34 +08:00
|
|
|
return false;
|
|
|
|
|
2016-12-09 09:10:11 +08:00
|
|
|
auto Fn = dyn_cast<Function>(Ptr->stripPointerCasts());
|
2016-02-10 06:50:34 +08:00
|
|
|
if (!Fn)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We can disregard __cxa_pure_virtual as a possible call target, as
|
|
|
|
// calls to pure virtuals are UB.
|
|
|
|
if (Fn->getName() == "__cxa_pure_virtual")
|
|
|
|
continue;
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
TargetsForSlot.push_back({Fn, &TM});
|
2016-02-10 06:50:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Give up if we couldn't find any targets.
|
|
|
|
return !TargetsForSlot.empty();
|
|
|
|
}
|
|
|
|
|
2019-08-02 21:10:52 +08:00
|
|
|
bool DevirtIndex::tryFindVirtualCallTargets(
|
|
|
|
std::vector<ValueInfo> &TargetsForSlot, const TypeIdCompatibleVtableInfo TIdInfo,
|
|
|
|
uint64_t ByteOffset) {
|
|
|
|
for (const TypeIdOffsetVtableInfo P : TIdInfo) {
|
[ThinLTO/WPD] Fix index-based WPD for available_externally vtables
Summary:
Clang does not add type metadata to available_externally vtables. When
choosing a summary to look at for virtual function definitions, make
sure we skip summaries for any available externally vtables as they will
not describe any virtual function functions, which are only summarized
in the presence of type metadata on the vtable def. Simply look for the
corresponding strong def's summary.
Also add handling for same-named local vtables with the same GUID
because of same-named files without enough distinguishing path.
In that case we return a conservative result with no devirtualization.
Reviewers: pcc, davidxl, evgeny777
Subscribers: mehdi_amini, inglorion, hiraditya, steven_wu, dexonsmith, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D69452
2019-10-26 05:56:12 +08:00
|
|
|
// Ensure that we have at most one external linkage vtable initializer.
|
2019-08-02 21:10:52 +08:00
|
|
|
assert(P.VTableVI.getSummaryList().size() == 1 ||
|
[ThinLTO/WPD] Fix index-based WPD for available_externally vtables
Summary:
Clang does not add type metadata to available_externally vtables. When
choosing a summary to look at for virtual function definitions, make
sure we skip summaries for any available externally vtables as they will
not describe any virtual function functions, which are only summarized
in the presence of type metadata on the vtable def. Simply look for the
corresponding strong def's summary.
Also add handling for same-named local vtables with the same GUID
because of same-named files without enough distinguishing path.
In that case we return a conservative result with no devirtualization.
Reviewers: pcc, davidxl, evgeny777
Subscribers: mehdi_amini, inglorion, hiraditya, steven_wu, dexonsmith, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D69452
2019-10-26 05:56:12 +08:00
|
|
|
llvm::count_if(
|
2019-08-02 21:10:52 +08:00
|
|
|
P.VTableVI.getSummaryList(),
|
|
|
|
[&](const std::unique_ptr<GlobalValueSummary> &Summary) {
|
[ThinLTO/WPD] Fix index-based WPD for available_externally vtables
Summary:
Clang does not add type metadata to available_externally vtables. When
choosing a summary to look at for virtual function definitions, make
sure we skip summaries for any available externally vtables as they will
not describe any virtual function functions, which are only summarized
in the presence of type metadata on the vtable def. Simply look for the
corresponding strong def's summary.
Also add handling for same-named local vtables with the same GUID
because of same-named files without enough distinguishing path.
In that case we return a conservative result with no devirtualization.
Reviewers: pcc, davidxl, evgeny777
Subscribers: mehdi_amini, inglorion, hiraditya, steven_wu, dexonsmith, arphaman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D69452
2019-10-26 05:56:12 +08:00
|
|
|
return GlobalValue::isExternalLinkage(Summary->linkage());
|
|
|
|
}) <= 1);
|
|
|
|
// Find the first non-available_externally linkage vtable initializer.
|
|
|
|
// We can have multiple available_externally, linkonce_odr and weak_odr
|
|
|
|
// vtable initializers, however we want to skip available_externally as they
|
|
|
|
// do not have type metadata attached, and therefore the summary will not
|
|
|
|
// contain any vtable functions.
|
|
|
|
//
|
|
|
|
// Also, handle the case of same-named local Vtables with the same path
|
|
|
|
// and therefore the same GUID. This can happen if there isn't enough
|
|
|
|
// distinguishing path when compiling the source file. In that case we
|
|
|
|
// conservatively return false early.
|
|
|
|
const GlobalVarSummary *VS = nullptr;
|
|
|
|
bool LocalFound = false;
|
|
|
|
for (auto &S : P.VTableVI.getSummaryList()) {
|
|
|
|
if (GlobalValue::isLocalLinkage(S->linkage())) {
|
|
|
|
if (LocalFound)
|
|
|
|
return false;
|
|
|
|
LocalFound = true;
|
|
|
|
}
|
|
|
|
if (!GlobalValue::isAvailableExternallyLinkage(S->linkage()))
|
|
|
|
VS = cast<GlobalVarSummary>(S.get());
|
|
|
|
}
|
|
|
|
if (!VS->isLive())
|
2019-08-02 21:10:52 +08:00
|
|
|
continue;
|
|
|
|
for (auto VTP : VS->vTableFuncs()) {
|
|
|
|
if (VTP.VTableOffset != P.AddressPointOffset + ByteOffset)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
TargetsForSlot.push_back(VTP.FuncVI);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Give up if we couldn't find any targets.
|
|
|
|
return !TargetsForSlot.empty();
|
|
|
|
}
|
|
|
|
|
2017-02-16 05:56:51 +08:00
|
|
|
void DevirtModule::applySingleImplDevirt(VTableSlotInfo &SlotInfo,
|
2017-03-04 09:31:01 +08:00
|
|
|
Constant *TheFn, bool &IsExported) {
|
2017-02-16 05:56:51 +08:00
|
|
|
auto Apply = [&](CallSiteInfo &CSInfo) {
|
|
|
|
for (auto &&VCallSite : CSInfo.CallSites) {
|
|
|
|
if (RemarksEnabled)
|
2018-08-14 11:00:16 +08:00
|
|
|
VCallSite.emitRemark("single-impl",
|
|
|
|
TheFn->stripPointerCasts()->getName(), OREGetter);
|
2017-02-16 05:56:51 +08:00
|
|
|
VCallSite.CS.setCalledFunction(ConstantExpr::getBitCast(
|
|
|
|
TheFn, VCallSite.CS.getCalledValue()->getType()));
|
|
|
|
// This use is no longer unsafe.
|
|
|
|
if (VCallSite.NumUnsafeUses)
|
|
|
|
--*VCallSite.NumUnsafeUses;
|
|
|
|
}
|
2018-03-10 03:11:44 +08:00
|
|
|
if (CSInfo.isExported())
|
2017-03-04 09:31:01 +08:00
|
|
|
IsExported = true;
|
2018-03-10 03:11:44 +08:00
|
|
|
CSInfo.markDevirt();
|
2017-02-16 05:56:51 +08:00
|
|
|
};
|
|
|
|
Apply(SlotInfo.CSInfo);
|
|
|
|
for (auto &P : SlotInfo.ConstCSInfo)
|
|
|
|
Apply(P.second);
|
|
|
|
}
|
|
|
|
|
2019-10-17 15:46:18 +08:00
|
|
|
static bool AddCalls(VTableSlotInfo &SlotInfo, const ValueInfo &Callee) {
|
|
|
|
// We can't add calls if we haven't seen a definition
|
|
|
|
if (Callee.getSummaryList().empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Insert calls into the summary index so that the devirtualized targets
|
|
|
|
// are eligible for import.
|
|
|
|
// FIXME: Annotate type tests with hotness. For now, mark these as hot
|
|
|
|
// to better ensure we have the opportunity to inline them.
|
|
|
|
bool IsExported = false;
|
|
|
|
auto &S = Callee.getSummaryList()[0];
|
|
|
|
CalleeInfo CI(CalleeInfo::HotnessType::Hot, /* RelBF = */ 0);
|
|
|
|
auto AddCalls = [&](CallSiteInfo &CSInfo) {
|
|
|
|
for (auto *FS : CSInfo.SummaryTypeCheckedLoadUsers) {
|
|
|
|
FS->addCall({Callee, CI});
|
|
|
|
IsExported |= S->modulePath() != FS->modulePath();
|
|
|
|
}
|
|
|
|
for (auto *FS : CSInfo.SummaryTypeTestAssumeUsers) {
|
|
|
|
FS->addCall({Callee, CI});
|
|
|
|
IsExported |= S->modulePath() != FS->modulePath();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
AddCalls(SlotInfo.CSInfo);
|
|
|
|
for (auto &P : SlotInfo.ConstCSInfo)
|
|
|
|
AddCalls(P.second);
|
|
|
|
return IsExported;
|
|
|
|
}
|
|
|
|
|
2017-02-15 10:13:08 +08:00
|
|
|
bool DevirtModule::trySingleImplDevirt(
|
2019-10-17 15:46:18 +08:00
|
|
|
ModuleSummaryIndex *ExportSummary,
|
|
|
|
MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo,
|
|
|
|
WholeProgramDevirtResolution *Res) {
|
2017-02-15 10:13:08 +08:00
|
|
|
// See if the program contains a single implementation of this virtual
|
|
|
|
// function.
|
|
|
|
Function *TheFn = TargetsForSlot[0].Fn;
|
|
|
|
for (auto &&Target : TargetsForSlot)
|
|
|
|
if (TheFn != Target.Fn)
|
|
|
|
return false;
|
|
|
|
|
2017-02-16 05:56:51 +08:00
|
|
|
// If so, update each call site to call that implementation directly.
|
2017-02-15 10:13:08 +08:00
|
|
|
if (RemarksEnabled)
|
|
|
|
TargetsForSlot[0].WasDevirt = true;
|
2017-03-04 09:31:01 +08:00
|
|
|
|
|
|
|
bool IsExported = false;
|
|
|
|
applySingleImplDevirt(SlotInfo, TheFn, IsExported);
|
|
|
|
if (!IsExported)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the only implementation has local linkage, we must promote to external
|
|
|
|
// to make it visible to thin LTO objects. We can only get here during the
|
|
|
|
// ThinLTO export phase.
|
|
|
|
if (TheFn->hasLocalLinkage()) {
|
2017-09-08 08:10:53 +08:00
|
|
|
std::string NewName = (TheFn->getName() + "$merged").str();
|
|
|
|
|
|
|
|
// Since we are renaming the function, any comdats with the same name must
|
|
|
|
// also be renamed. This is required when targeting COFF, as the comdat name
|
|
|
|
// must match one of the names of the symbols in the comdat.
|
|
|
|
if (Comdat *C = TheFn->getComdat()) {
|
|
|
|
if (C->getName() == TheFn->getName()) {
|
|
|
|
Comdat *NewC = M.getOrInsertComdat(NewName);
|
|
|
|
NewC->setSelectionKind(C->getSelectionKind());
|
|
|
|
for (GlobalObject &GO : M.global_objects())
|
|
|
|
if (GO.getComdat() == C)
|
|
|
|
GO.setComdat(NewC);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-04 09:31:01 +08:00
|
|
|
TheFn->setLinkage(GlobalValue::ExternalLinkage);
|
|
|
|
TheFn->setVisibility(GlobalValue::HiddenVisibility);
|
2017-09-08 08:10:53 +08:00
|
|
|
TheFn->setName(NewName);
|
2017-03-04 09:31:01 +08:00
|
|
|
}
|
2019-10-17 15:46:18 +08:00
|
|
|
if (ValueInfo TheFnVI = ExportSummary->getValueInfo(TheFn->getGUID()))
|
|
|
|
// Any needed promotion of 'TheFn' has already been done during
|
|
|
|
// LTO unit split, so we can ignore return value of AddCalls.
|
|
|
|
AddCalls(SlotInfo, TheFnVI);
|
2017-03-04 09:31:01 +08:00
|
|
|
|
|
|
|
Res->TheKind = WholeProgramDevirtResolution::SingleImpl;
|
|
|
|
Res->SingleImplName = TheFn->getName();
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-08-02 21:10:52 +08:00
|
|
|
bool DevirtIndex::trySingleImplDevirt(MutableArrayRef<ValueInfo> TargetsForSlot,
|
|
|
|
VTableSlotSummary &SlotSummary,
|
|
|
|
VTableSlotInfo &SlotInfo,
|
|
|
|
WholeProgramDevirtResolution *Res,
|
|
|
|
std::set<ValueInfo> &DevirtTargets) {
|
|
|
|
// See if the program contains a single implementation of this virtual
|
|
|
|
// function.
|
|
|
|
auto TheFn = TargetsForSlot[0];
|
|
|
|
for (auto &&Target : TargetsForSlot)
|
|
|
|
if (TheFn != Target)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Don't devirtualize if we don't have target definition.
|
|
|
|
auto Size = TheFn.getSummaryList().size();
|
|
|
|
if (!Size)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the summary list contains multiple summaries where at least one is
|
|
|
|
// a local, give up, as we won't know which (possibly promoted) name to use.
|
|
|
|
for (auto &S : TheFn.getSummaryList())
|
|
|
|
if (GlobalValue::isLocalLinkage(S->linkage()) && Size > 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Collect functions devirtualized at least for one call site for stats.
|
|
|
|
if (PrintSummaryDevirt)
|
|
|
|
DevirtTargets.insert(TheFn);
|
|
|
|
|
|
|
|
auto &S = TheFn.getSummaryList()[0];
|
2019-10-17 15:46:18 +08:00
|
|
|
bool IsExported = AddCalls(SlotInfo, TheFn);
|
2019-08-02 21:10:52 +08:00
|
|
|
if (IsExported)
|
|
|
|
ExportedGUIDs.insert(TheFn.getGUID());
|
|
|
|
|
|
|
|
// Record in summary for use in devirtualization during the ThinLTO import
|
|
|
|
// step.
|
|
|
|
Res->TheKind = WholeProgramDevirtResolution::SingleImpl;
|
|
|
|
if (GlobalValue::isLocalLinkage(S->linkage())) {
|
|
|
|
if (IsExported)
|
|
|
|
// If target is a local function and we are exporting it by
|
|
|
|
// devirtualizing a call in another module, we need to record the
|
|
|
|
// promoted name.
|
|
|
|
Res->SingleImplName = ModuleSummaryIndex::getGlobalNameForLocal(
|
|
|
|
TheFn.name(), ExportSummary.getModuleHash(S->modulePath()));
|
|
|
|
else {
|
|
|
|
LocalWPDTargetsMap[TheFn].push_back(SlotSummary);
|
|
|
|
Res->SingleImplName = TheFn.name();
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
Res->SingleImplName = TheFn.name();
|
|
|
|
|
|
|
|
// Name will be empty if this thin link driven off of serialized combined
|
|
|
|
// index (e.g. llvm-lto). However, WPD is not supported/invoked for the
|
|
|
|
// legacy LTO API anyway.
|
|
|
|
assert(!Res->SingleImplName.empty());
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-03-10 03:11:44 +08:00
|
|
|
void DevirtModule::tryICallBranchFunnel(
|
|
|
|
MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo,
|
|
|
|
WholeProgramDevirtResolution *Res, VTableSlot Slot) {
|
|
|
|
Triple T(M.getTargetTriple());
|
|
|
|
if (T.getArch() != Triple::x86_64)
|
|
|
|
return;
|
|
|
|
|
2018-04-07 05:32:36 +08:00
|
|
|
if (TargetsForSlot.size() > ClThreshold)
|
2018-03-10 03:11:44 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
bool HasNonDevirt = !SlotInfo.CSInfo.AllCallSitesDevirted;
|
|
|
|
if (!HasNonDevirt)
|
|
|
|
for (auto &P : SlotInfo.ConstCSInfo)
|
|
|
|
if (!P.second.AllCallSitesDevirted) {
|
|
|
|
HasNonDevirt = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!HasNonDevirt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
FunctionType *FT =
|
|
|
|
FunctionType::get(Type::getVoidTy(M.getContext()), {Int8PtrTy}, true);
|
|
|
|
Function *JT;
|
|
|
|
if (isa<MDString>(Slot.TypeID)) {
|
|
|
|
JT = Function::Create(FT, Function::ExternalLinkage,
|
2018-12-18 17:52:52 +08:00
|
|
|
M.getDataLayout().getProgramAddressSpace(),
|
2018-03-10 03:11:44 +08:00
|
|
|
getGlobalName(Slot, {}, "branch_funnel"), &M);
|
|
|
|
JT->setVisibility(GlobalValue::HiddenVisibility);
|
|
|
|
} else {
|
2018-12-18 17:52:52 +08:00
|
|
|
JT = Function::Create(FT, Function::InternalLinkage,
|
|
|
|
M.getDataLayout().getProgramAddressSpace(),
|
|
|
|
"branch_funnel", &M);
|
2018-03-10 03:11:44 +08:00
|
|
|
}
|
|
|
|
JT->addAttribute(1, Attribute::Nest);
|
|
|
|
|
|
|
|
std::vector<Value *> JTArgs;
|
|
|
|
JTArgs.push_back(JT->arg_begin());
|
|
|
|
for (auto &T : TargetsForSlot) {
|
|
|
|
JTArgs.push_back(getMemberAddr(T.TM));
|
|
|
|
JTArgs.push_back(T.Fn);
|
|
|
|
}
|
|
|
|
|
|
|
|
BasicBlock *BB = BasicBlock::Create(M.getContext(), "", JT, nullptr);
|
2019-02-02 04:43:25 +08:00
|
|
|
Function *Intr =
|
2018-03-10 03:11:44 +08:00
|
|
|
Intrinsic::getDeclaration(&M, llvm::Intrinsic::icall_branch_funnel, {});
|
|
|
|
|
|
|
|
auto *CI = CallInst::Create(Intr, JTArgs, "", BB);
|
|
|
|
CI->setTailCallKind(CallInst::TCK_MustTail);
|
|
|
|
ReturnInst::Create(M.getContext(), nullptr, BB);
|
|
|
|
|
|
|
|
bool IsExported = false;
|
|
|
|
applyICallBranchFunnel(SlotInfo, JT, IsExported);
|
|
|
|
if (IsExported)
|
|
|
|
Res->TheKind = WholeProgramDevirtResolution::BranchFunnel;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DevirtModule::applyICallBranchFunnel(VTableSlotInfo &SlotInfo,
|
|
|
|
Constant *JT, bool &IsExported) {
|
|
|
|
auto Apply = [&](CallSiteInfo &CSInfo) {
|
|
|
|
if (CSInfo.isExported())
|
|
|
|
IsExported = true;
|
|
|
|
if (CSInfo.AllCallSitesDevirted)
|
|
|
|
return;
|
|
|
|
for (auto &&VCallSite : CSInfo.CallSites) {
|
|
|
|
CallSite CS = VCallSite.CS;
|
|
|
|
|
|
|
|
// Jump tables are only profitable if the retpoline mitigation is enabled.
|
|
|
|
Attribute FSAttr = CS.getCaller()->getFnAttribute("target-features");
|
|
|
|
if (FSAttr.hasAttribute(Attribute::None) ||
|
|
|
|
!FSAttr.getValueAsString().contains("+retpoline"))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (RemarksEnabled)
|
2018-08-14 11:00:16 +08:00
|
|
|
VCallSite.emitRemark("branch-funnel",
|
|
|
|
JT->stripPointerCasts()->getName(), OREGetter);
|
2018-03-10 03:11:44 +08:00
|
|
|
|
|
|
|
// Pass the address of the vtable in the nest register, which is r10 on
|
|
|
|
// x86_64.
|
|
|
|
std::vector<Type *> NewArgs;
|
|
|
|
NewArgs.push_back(Int8PtrTy);
|
|
|
|
for (Type *T : CS.getFunctionType()->params())
|
|
|
|
NewArgs.push_back(T);
|
2019-02-02 04:43:25 +08:00
|
|
|
FunctionType *NewFT =
|
2018-03-10 03:11:44 +08:00
|
|
|
FunctionType::get(CS.getFunctionType()->getReturnType(), NewArgs,
|
2019-02-02 04:43:25 +08:00
|
|
|
CS.getFunctionType()->isVarArg());
|
|
|
|
PointerType *NewFTPtr = PointerType::getUnqual(NewFT);
|
2018-03-10 03:11:44 +08:00
|
|
|
|
|
|
|
IRBuilder<> IRB(CS.getInstruction());
|
|
|
|
std::vector<Value *> Args;
|
|
|
|
Args.push_back(IRB.CreateBitCast(VCallSite.VTable, Int8PtrTy));
|
|
|
|
for (unsigned I = 0; I != CS.getNumArgOperands(); ++I)
|
|
|
|
Args.push_back(CS.getArgOperand(I));
|
|
|
|
|
|
|
|
CallSite NewCS;
|
|
|
|
if (CS.isCall())
|
2019-02-02 04:43:25 +08:00
|
|
|
NewCS = IRB.CreateCall(NewFT, IRB.CreateBitCast(JT, NewFTPtr), Args);
|
2018-03-10 03:11:44 +08:00
|
|
|
else
|
|
|
|
NewCS = IRB.CreateInvoke(
|
2019-02-02 04:43:34 +08:00
|
|
|
NewFT, IRB.CreateBitCast(JT, NewFTPtr),
|
2018-03-10 03:11:44 +08:00
|
|
|
cast<InvokeInst>(CS.getInstruction())->getNormalDest(),
|
|
|
|
cast<InvokeInst>(CS.getInstruction())->getUnwindDest(), Args);
|
|
|
|
NewCS.setCallingConv(CS.getCallingConv());
|
|
|
|
|
|
|
|
AttributeList Attrs = CS.getAttributes();
|
|
|
|
std::vector<AttributeSet> NewArgAttrs;
|
|
|
|
NewArgAttrs.push_back(AttributeSet::get(
|
|
|
|
M.getContext(), ArrayRef<Attribute>{Attribute::get(
|
|
|
|
M.getContext(), Attribute::Nest)}));
|
|
|
|
for (unsigned I = 0; I + 2 < Attrs.getNumAttrSets(); ++I)
|
|
|
|
NewArgAttrs.push_back(Attrs.getParamAttributes(I));
|
|
|
|
NewCS.setAttributes(
|
|
|
|
AttributeList::get(M.getContext(), Attrs.getFnAttributes(),
|
|
|
|
Attrs.getRetAttributes(), NewArgAttrs));
|
|
|
|
|
|
|
|
CS->replaceAllUsesWith(NewCS.getInstruction());
|
|
|
|
CS->eraseFromParent();
|
|
|
|
|
|
|
|
// This use is no longer unsafe.
|
|
|
|
if (VCallSite.NumUnsafeUses)
|
|
|
|
--*VCallSite.NumUnsafeUses;
|
|
|
|
}
|
|
|
|
// Don't mark as devirtualized because there may be callers compiled without
|
|
|
|
// retpoline mitigation, which would mean that they are lowered to
|
|
|
|
// llvm.type.test and therefore require an llvm.type.test resolution for the
|
|
|
|
// type identifier.
|
|
|
|
};
|
|
|
|
Apply(SlotInfo.CSInfo);
|
|
|
|
for (auto &P : SlotInfo.ConstCSInfo)
|
|
|
|
Apply(P.second);
|
|
|
|
}
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
bool DevirtModule::tryEvaluateFunctionsWithArgs(
|
|
|
|
MutableArrayRef<VirtualCallTarget> TargetsForSlot,
|
2017-02-16 05:56:51 +08:00
|
|
|
ArrayRef<uint64_t> Args) {
|
2016-02-10 06:50:34 +08:00
|
|
|
// Evaluate each function and store the result in each target's RetVal
|
|
|
|
// field.
|
|
|
|
for (VirtualCallTarget &Target : TargetsForSlot) {
|
|
|
|
if (Target.Fn->arg_size() != Args.size() + 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Evaluator Eval(M.getDataLayout(), nullptr);
|
|
|
|
SmallVector<Constant *, 2> EvalArgs;
|
|
|
|
EvalArgs.push_back(
|
|
|
|
Constant::getNullValue(Target.Fn->getFunctionType()->getParamType(0)));
|
2017-02-16 05:56:51 +08:00
|
|
|
for (unsigned I = 0; I != Args.size(); ++I) {
|
|
|
|
auto *ArgTy = dyn_cast<IntegerType>(
|
|
|
|
Target.Fn->getFunctionType()->getParamType(I + 1));
|
|
|
|
if (!ArgTy)
|
|
|
|
return false;
|
|
|
|
EvalArgs.push_back(ConstantInt::get(ArgTy, Args[I]));
|
|
|
|
}
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
Constant *RetVal;
|
|
|
|
if (!Eval.EvaluateFunction(Target.Fn, RetVal, EvalArgs) ||
|
|
|
|
!isa<ConstantInt>(RetVal))
|
|
|
|
return false;
|
|
|
|
Target.RetVal = cast<ConstantInt>(RetVal)->getZExtValue();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-02-16 05:56:51 +08:00
|
|
|
void DevirtModule::applyUniformRetValOpt(CallSiteInfo &CSInfo, StringRef FnName,
|
|
|
|
uint64_t TheRetVal) {
|
|
|
|
for (auto Call : CSInfo.CallSites)
|
|
|
|
Call.replaceAndErase(
|
2017-08-22 00:57:21 +08:00
|
|
|
"uniform-ret-val", FnName, RemarksEnabled, OREGetter,
|
2017-02-16 05:56:51 +08:00
|
|
|
ConstantInt::get(cast<IntegerType>(Call.CS.getType()), TheRetVal));
|
2017-03-11 04:09:11 +08:00
|
|
|
CSInfo.markDevirt();
|
2017-02-16 05:56:51 +08:00
|
|
|
}
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
bool DevirtModule::tryUniformRetValOpt(
|
2017-03-04 09:34:53 +08:00
|
|
|
MutableArrayRef<VirtualCallTarget> TargetsForSlot, CallSiteInfo &CSInfo,
|
|
|
|
WholeProgramDevirtResolution::ByArg *Res) {
|
2016-02-10 06:50:34 +08:00
|
|
|
// Uniform return value optimization. If all functions return the same
|
|
|
|
// constant, replace all calls with that constant.
|
|
|
|
uint64_t TheRetVal = TargetsForSlot[0].RetVal;
|
|
|
|
for (const VirtualCallTarget &Target : TargetsForSlot)
|
|
|
|
if (Target.RetVal != TheRetVal)
|
|
|
|
return false;
|
|
|
|
|
2017-03-04 09:34:53 +08:00
|
|
|
if (CSInfo.isExported()) {
|
|
|
|
Res->TheKind = WholeProgramDevirtResolution::ByArg::UniformRetVal;
|
|
|
|
Res->Info = TheRetVal;
|
|
|
|
}
|
|
|
|
|
2017-02-16 05:56:51 +08:00
|
|
|
applyUniformRetValOpt(CSInfo, TargetsForSlot[0].Fn->getName(), TheRetVal);
|
2016-08-12 03:09:02 +08:00
|
|
|
if (RemarksEnabled)
|
|
|
|
for (auto &&Target : TargetsForSlot)
|
|
|
|
Target.WasDevirt = true;
|
2016-02-10 06:50:34 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-03-11 04:09:11 +08:00
|
|
|
std::string DevirtModule::getGlobalName(VTableSlot Slot,
|
|
|
|
ArrayRef<uint64_t> Args,
|
|
|
|
StringRef Name) {
|
|
|
|
std::string FullName = "__typeid_";
|
|
|
|
raw_string_ostream OS(FullName);
|
|
|
|
OS << cast<MDString>(Slot.TypeID)->getString() << '_' << Slot.ByteOffset;
|
|
|
|
for (uint64_t Arg : Args)
|
|
|
|
OS << '_' << Arg;
|
|
|
|
OS << '_' << Name;
|
|
|
|
return OS.str();
|
|
|
|
}
|
|
|
|
|
2017-09-12 06:34:42 +08:00
|
|
|
bool DevirtModule::shouldExportConstantsAsAbsoluteSymbols() {
|
|
|
|
Triple T(M.getTargetTriple());
|
|
|
|
return (T.getArch() == Triple::x86 || T.getArch() == Triple::x86_64) &&
|
|
|
|
T.getObjectFormat() == Triple::ELF;
|
|
|
|
}
|
|
|
|
|
2017-03-11 04:09:11 +08:00
|
|
|
void DevirtModule::exportGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args,
|
|
|
|
StringRef Name, Constant *C) {
|
|
|
|
GlobalAlias *GA = GlobalAlias::create(Int8Ty, 0, GlobalValue::ExternalLinkage,
|
|
|
|
getGlobalName(Slot, Args, Name), C, &M);
|
|
|
|
GA->setVisibility(GlobalValue::HiddenVisibility);
|
|
|
|
}
|
|
|
|
|
2017-09-12 06:34:42 +08:00
|
|
|
void DevirtModule::exportConstant(VTableSlot Slot, ArrayRef<uint64_t> Args,
|
|
|
|
StringRef Name, uint32_t Const,
|
|
|
|
uint32_t &Storage) {
|
|
|
|
if (shouldExportConstantsAsAbsoluteSymbols()) {
|
|
|
|
exportGlobal(
|
|
|
|
Slot, Args, Name,
|
|
|
|
ConstantExpr::getIntToPtr(ConstantInt::get(Int32Ty, Const), Int8PtrTy));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Storage = Const;
|
|
|
|
}
|
|
|
|
|
2017-03-11 04:09:11 +08:00
|
|
|
Constant *DevirtModule::importGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args,
|
2017-09-12 06:34:42 +08:00
|
|
|
StringRef Name) {
|
2017-03-11 04:09:11 +08:00
|
|
|
Constant *C = M.getOrInsertGlobal(getGlobalName(Slot, Args, Name), Int8Ty);
|
|
|
|
auto *GV = dyn_cast<GlobalVariable>(C);
|
2017-09-12 06:34:42 +08:00
|
|
|
if (GV)
|
|
|
|
GV->setVisibility(GlobalValue::HiddenVisibility);
|
|
|
|
return C;
|
|
|
|
}
|
|
|
|
|
|
|
|
Constant *DevirtModule::importConstant(VTableSlot Slot, ArrayRef<uint64_t> Args,
|
|
|
|
StringRef Name, IntegerType *IntTy,
|
|
|
|
uint32_t Storage) {
|
|
|
|
if (!shouldExportConstantsAsAbsoluteSymbols())
|
|
|
|
return ConstantInt::get(IntTy, Storage);
|
|
|
|
|
|
|
|
Constant *C = importGlobal(Slot, Args, Name);
|
|
|
|
auto *GV = cast<GlobalVariable>(C->stripPointerCasts());
|
|
|
|
C = ConstantExpr::getPtrToInt(C, IntTy);
|
|
|
|
|
2017-03-11 04:13:58 +08:00
|
|
|
// We only need to set metadata if the global is newly created, in which
|
|
|
|
// case it would not have hidden visibility.
|
2018-05-31 21:29:58 +08:00
|
|
|
if (GV->hasMetadata(LLVMContext::MD_absolute_symbol))
|
2017-03-11 04:09:11 +08:00
|
|
|
return C;
|
2017-03-11 04:13:58 +08:00
|
|
|
|
|
|
|
auto SetAbsRange = [&](uint64_t Min, uint64_t Max) {
|
|
|
|
auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min));
|
|
|
|
auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max));
|
|
|
|
GV->setMetadata(LLVMContext::MD_absolute_symbol,
|
|
|
|
MDNode::get(M.getContext(), {MinC, MaxC}));
|
|
|
|
};
|
2017-09-12 06:34:42 +08:00
|
|
|
unsigned AbsWidth = IntTy->getBitWidth();
|
2017-03-11 04:13:58 +08:00
|
|
|
if (AbsWidth == IntPtrTy->getBitWidth())
|
|
|
|
SetAbsRange(~0ull, ~0ull); // Full set.
|
2017-09-12 06:34:42 +08:00
|
|
|
else
|
2017-03-11 04:13:58 +08:00
|
|
|
SetAbsRange(0, 1ull << AbsWidth);
|
2017-09-12 06:34:42 +08:00
|
|
|
return C;
|
2017-03-11 04:09:11 +08:00
|
|
|
}
|
|
|
|
|
2017-02-16 05:56:51 +08:00
|
|
|
void DevirtModule::applyUniqueRetValOpt(CallSiteInfo &CSInfo, StringRef FnName,
|
|
|
|
bool IsOne,
|
|
|
|
Constant *UniqueMemberAddr) {
|
|
|
|
for (auto &&Call : CSInfo.CallSites) {
|
|
|
|
IRBuilder<> B(Call.CS.getInstruction());
|
2017-08-23 05:41:19 +08:00
|
|
|
Value *Cmp =
|
|
|
|
B.CreateICmp(IsOne ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
|
|
|
|
B.CreateBitCast(Call.VTable, Int8PtrTy), UniqueMemberAddr);
|
2017-02-16 05:56:51 +08:00
|
|
|
Cmp = B.CreateZExt(Cmp, Call.CS->getType());
|
2017-08-22 00:57:21 +08:00
|
|
|
Call.replaceAndErase("unique-ret-val", FnName, RemarksEnabled, OREGetter,
|
|
|
|
Cmp);
|
2017-02-16 05:56:51 +08:00
|
|
|
}
|
2017-03-11 04:09:11 +08:00
|
|
|
CSInfo.markDevirt();
|
2017-02-16 05:56:51 +08:00
|
|
|
}
|
|
|
|
|
2018-03-10 03:11:44 +08:00
|
|
|
Constant *DevirtModule::getMemberAddr(const TypeMemberInfo *M) {
|
|
|
|
Constant *C = ConstantExpr::getBitCast(M->Bits->GV, Int8PtrTy);
|
|
|
|
return ConstantExpr::getGetElementPtr(Int8Ty, C,
|
|
|
|
ConstantInt::get(Int64Ty, M->Offset));
|
|
|
|
}
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
bool DevirtModule::tryUniqueRetValOpt(
|
2016-08-12 03:09:02 +08:00
|
|
|
unsigned BitWidth, MutableArrayRef<VirtualCallTarget> TargetsForSlot,
|
2017-03-11 04:09:11 +08:00
|
|
|
CallSiteInfo &CSInfo, WholeProgramDevirtResolution::ByArg *Res,
|
|
|
|
VTableSlot Slot, ArrayRef<uint64_t> Args) {
|
2016-02-10 06:50:34 +08:00
|
|
|
// IsOne controls whether we look for a 0 or a 1.
|
|
|
|
auto tryUniqueRetValOptFor = [&](bool IsOne) {
|
2016-08-12 01:20:18 +08:00
|
|
|
const TypeMemberInfo *UniqueMember = nullptr;
|
2016-02-10 06:50:34 +08:00
|
|
|
for (const VirtualCallTarget &Target : TargetsForSlot) {
|
2016-03-08 11:50:36 +08:00
|
|
|
if (Target.RetVal == (IsOne ? 1 : 0)) {
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
if (UniqueMember)
|
2016-02-10 06:50:34 +08:00
|
|
|
return false;
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
UniqueMember = Target.TM;
|
2016-02-10 06:50:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
// We should have found a unique member or bailed out by now. We already
|
2016-02-10 06:50:34 +08:00
|
|
|
// checked for a uniform return value in tryUniformRetValOpt.
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
assert(UniqueMember);
|
2016-02-10 06:50:34 +08:00
|
|
|
|
2018-03-10 03:11:44 +08:00
|
|
|
Constant *UniqueMemberAddr = getMemberAddr(UniqueMember);
|
2017-03-11 04:09:11 +08:00
|
|
|
if (CSInfo.isExported()) {
|
|
|
|
Res->TheKind = WholeProgramDevirtResolution::ByArg::UniqueRetVal;
|
|
|
|
Res->Info = IsOne;
|
|
|
|
|
|
|
|
exportGlobal(Slot, Args, "unique_member", UniqueMemberAddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Replace each call with the comparison.
|
2017-02-16 05:56:51 +08:00
|
|
|
applyUniqueRetValOpt(CSInfo, TargetsForSlot[0].Fn->getName(), IsOne,
|
|
|
|
UniqueMemberAddr);
|
|
|
|
|
2016-08-12 03:09:02 +08:00
|
|
|
// Update devirtualization statistics for targets.
|
|
|
|
if (RemarksEnabled)
|
|
|
|
for (auto &&Target : TargetsForSlot)
|
|
|
|
Target.WasDevirt = true;
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (BitWidth == 1) {
|
|
|
|
if (tryUniqueRetValOptFor(true))
|
|
|
|
return true;
|
|
|
|
if (tryUniqueRetValOptFor(false))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-02-16 05:56:51 +08:00
|
|
|
void DevirtModule::applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName,
|
|
|
|
Constant *Byte, Constant *Bit) {
|
|
|
|
for (auto Call : CSInfo.CallSites) {
|
|
|
|
auto *RetType = cast<IntegerType>(Call.CS.getType());
|
|
|
|
IRBuilder<> B(Call.CS.getInstruction());
|
2017-08-23 05:41:19 +08:00
|
|
|
Value *Addr =
|
|
|
|
B.CreateGEP(Int8Ty, B.CreateBitCast(Call.VTable, Int8PtrTy), Byte);
|
2017-02-16 05:56:51 +08:00
|
|
|
if (RetType->getBitWidth() == 1) {
|
2019-02-02 04:44:24 +08:00
|
|
|
Value *Bits = B.CreateLoad(Int8Ty, Addr);
|
2017-02-16 05:56:51 +08:00
|
|
|
Value *BitsAndBit = B.CreateAnd(Bits, Bit);
|
|
|
|
auto IsBitSet = B.CreateICmpNE(BitsAndBit, ConstantInt::get(Int8Ty, 0));
|
|
|
|
Call.replaceAndErase("virtual-const-prop-1-bit", FnName, RemarksEnabled,
|
2017-08-22 00:57:21 +08:00
|
|
|
OREGetter, IsBitSet);
|
2017-02-16 05:56:51 +08:00
|
|
|
} else {
|
|
|
|
Value *ValAddr = B.CreateBitCast(Addr, RetType->getPointerTo());
|
|
|
|
Value *Val = B.CreateLoad(RetType, ValAddr);
|
2017-08-22 00:57:21 +08:00
|
|
|
Call.replaceAndErase("virtual-const-prop", FnName, RemarksEnabled,
|
|
|
|
OREGetter, Val);
|
2017-02-16 05:56:51 +08:00
|
|
|
}
|
|
|
|
}
|
2017-03-11 04:13:58 +08:00
|
|
|
CSInfo.markDevirt();
|
2017-02-16 05:56:51 +08:00
|
|
|
}
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
bool DevirtModule::tryVirtualConstProp(
|
2017-03-11 04:09:11 +08:00
|
|
|
MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo,
|
|
|
|
WholeProgramDevirtResolution *Res, VTableSlot Slot) {
|
2016-02-10 06:50:34 +08:00
|
|
|
// This only works if the function returns an integer.
|
|
|
|
auto RetType = dyn_cast<IntegerType>(TargetsForSlot[0].Fn->getReturnType());
|
|
|
|
if (!RetType)
|
|
|
|
return false;
|
|
|
|
unsigned BitWidth = RetType->getBitWidth();
|
|
|
|
if (BitWidth > 64)
|
|
|
|
return false;
|
|
|
|
|
2017-02-10 07:46:26 +08:00
|
|
|
// Make sure that each function is defined, does not access memory, takes at
|
|
|
|
// least one argument, does not use its first argument (which we assume is
|
|
|
|
// 'this'), and has the same return type.
|
2017-02-18 02:17:04 +08:00
|
|
|
//
|
|
|
|
// Note that we test whether this copy of the function is readnone, rather
|
|
|
|
// than testing function attributes, which must hold for any copy of the
|
|
|
|
// function, even a less optimized version substituted at link time. This is
|
|
|
|
// sound because the virtual constant propagation optimizations effectively
|
|
|
|
// inline all implementations of the virtual function into each call site,
|
|
|
|
// rather than using function attributes to perform local optimization.
|
2016-02-10 06:50:34 +08:00
|
|
|
for (VirtualCallTarget &Target : TargetsForSlot) {
|
2017-02-18 02:17:04 +08:00
|
|
|
if (Target.Fn->isDeclaration() ||
|
|
|
|
computeFunctionBodyMemoryAccess(*Target.Fn, AARGetter(*Target.Fn)) !=
|
|
|
|
MAK_ReadNone ||
|
2017-02-10 07:46:26 +08:00
|
|
|
Target.Fn->arg_empty() || !Target.Fn->arg_begin()->use_empty() ||
|
2016-02-10 06:50:34 +08:00
|
|
|
Target.Fn->getReturnType() != RetType)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-02-16 05:56:51 +08:00
|
|
|
for (auto &&CSByConstantArg : SlotInfo.ConstCSInfo) {
|
2016-02-10 06:50:34 +08:00
|
|
|
if (!tryEvaluateFunctionsWithArgs(TargetsForSlot, CSByConstantArg.first))
|
|
|
|
continue;
|
|
|
|
|
2017-03-04 09:34:53 +08:00
|
|
|
WholeProgramDevirtResolution::ByArg *ResByArg = nullptr;
|
|
|
|
if (Res)
|
|
|
|
ResByArg = &Res->ResByArg[CSByConstantArg.first];
|
|
|
|
|
|
|
|
if (tryUniformRetValOpt(TargetsForSlot, CSByConstantArg.second, ResByArg))
|
2016-02-10 06:50:34 +08:00
|
|
|
continue;
|
|
|
|
|
2017-03-11 04:09:11 +08:00
|
|
|
if (tryUniqueRetValOpt(BitWidth, TargetsForSlot, CSByConstantArg.second,
|
|
|
|
ResByArg, Slot, CSByConstantArg.first))
|
2016-02-10 06:50:34 +08:00
|
|
|
continue;
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
// Find an allocation offset in bits in all vtables associated with the
|
|
|
|
// type.
|
2016-02-10 06:50:34 +08:00
|
|
|
uint64_t AllocBefore =
|
|
|
|
findLowestOffset(TargetsForSlot, /*IsAfter=*/false, BitWidth);
|
|
|
|
uint64_t AllocAfter =
|
|
|
|
findLowestOffset(TargetsForSlot, /*IsAfter=*/true, BitWidth);
|
|
|
|
|
|
|
|
// Calculate the total amount of padding needed to store a value at both
|
|
|
|
// ends of the object.
|
|
|
|
uint64_t TotalPaddingBefore = 0, TotalPaddingAfter = 0;
|
|
|
|
for (auto &&Target : TargetsForSlot) {
|
|
|
|
TotalPaddingBefore += std::max<int64_t>(
|
|
|
|
(AllocBefore + 7) / 8 - Target.allocatedBeforeBytes() - 1, 0);
|
|
|
|
TotalPaddingAfter += std::max<int64_t>(
|
|
|
|
(AllocAfter + 7) / 8 - Target.allocatedAfterBytes() - 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the amount of padding is too large, give up.
|
|
|
|
// FIXME: do something smarter here.
|
|
|
|
if (std::min(TotalPaddingBefore, TotalPaddingAfter) > 128)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Calculate the offset to the value as a (possibly negative) byte offset
|
|
|
|
// and (if applicable) a bit offset, and store the values in the targets.
|
|
|
|
int64_t OffsetByte;
|
|
|
|
uint64_t OffsetBit;
|
|
|
|
if (TotalPaddingBefore <= TotalPaddingAfter)
|
|
|
|
setBeforeReturnValues(TargetsForSlot, AllocBefore, BitWidth, OffsetByte,
|
|
|
|
OffsetBit);
|
|
|
|
else
|
|
|
|
setAfterReturnValues(TargetsForSlot, AllocAfter, BitWidth, OffsetByte,
|
|
|
|
OffsetBit);
|
|
|
|
|
2016-08-12 03:09:02 +08:00
|
|
|
if (RemarksEnabled)
|
|
|
|
for (auto &&Target : TargetsForSlot)
|
|
|
|
Target.WasDevirt = true;
|
|
|
|
|
2017-03-11 04:13:58 +08:00
|
|
|
|
|
|
|
if (CSByConstantArg.second.isExported()) {
|
|
|
|
ResByArg->TheKind = WholeProgramDevirtResolution::ByArg::VirtualConstProp;
|
2017-09-12 06:34:42 +08:00
|
|
|
exportConstant(Slot, CSByConstantArg.first, "byte", OffsetByte,
|
|
|
|
ResByArg->Byte);
|
|
|
|
exportConstant(Slot, CSByConstantArg.first, "bit", 1ULL << OffsetBit,
|
|
|
|
ResByArg->Bit);
|
2017-03-11 04:13:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Rewrite each call to a load from OffsetByte/OffsetBit.
|
2017-09-12 06:34:42 +08:00
|
|
|
Constant *ByteConst = ConstantInt::get(Int32Ty, OffsetByte);
|
|
|
|
Constant *BitConst = ConstantInt::get(Int8Ty, 1ULL << OffsetBit);
|
2017-02-16 05:56:51 +08:00
|
|
|
applyVirtualConstProp(CSByConstantArg.second,
|
|
|
|
TargetsForSlot[0].Fn->getName(), ByteConst, BitConst);
|
2016-02-10 06:50:34 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DevirtModule::rebuildGlobal(VTableBits &B) {
|
|
|
|
if (B.Before.Bytes.empty() && B.After.Bytes.empty())
|
|
|
|
return;
|
|
|
|
|
2019-07-23 02:50:45 +08:00
|
|
|
// Align the before byte array to the global's minimum alignment so that we
|
|
|
|
// don't break any alignment requirements on the global.
|
2019-10-15 19:24:36 +08:00
|
|
|
MaybeAlign Alignment(B.GV->getAlignment());
|
|
|
|
if (!Alignment)
|
|
|
|
Alignment =
|
|
|
|
Align(M.getDataLayout().getABITypeAlignment(B.GV->getValueType()));
|
|
|
|
B.Before.Bytes.resize(alignTo(B.Before.Bytes.size(), Alignment));
|
2016-02-10 06:50:34 +08:00
|
|
|
|
|
|
|
// Before was stored in reverse order; flip it now.
|
|
|
|
for (size_t I = 0, Size = B.Before.Bytes.size(); I != Size / 2; ++I)
|
|
|
|
std::swap(B.Before.Bytes[I], B.Before.Bytes[Size - 1 - I]);
|
|
|
|
|
|
|
|
// Build an anonymous global containing the before bytes, followed by the
|
|
|
|
// original initializer, followed by the after bytes.
|
|
|
|
auto NewInit = ConstantStruct::getAnon(
|
|
|
|
{ConstantDataArray::get(M.getContext(), B.Before.Bytes),
|
|
|
|
B.GV->getInitializer(),
|
|
|
|
ConstantDataArray::get(M.getContext(), B.After.Bytes)});
|
|
|
|
auto NewGV =
|
|
|
|
new GlobalVariable(M, NewInit->getType(), B.GV->isConstant(),
|
|
|
|
GlobalVariable::PrivateLinkage, NewInit, "", B.GV);
|
|
|
|
NewGV->setSection(B.GV->getSection());
|
|
|
|
NewGV->setComdat(B.GV->getComdat());
|
2019-10-15 19:24:36 +08:00
|
|
|
NewGV->setAlignment(MaybeAlign(B.GV->getAlignment()));
|
2016-02-10 06:50:34 +08:00
|
|
|
|
2016-06-25 08:23:04 +08:00
|
|
|
// Copy the original vtable's metadata to the anonymous global, adjusting
|
|
|
|
// offsets as required.
|
|
|
|
NewGV->copyMetadata(B.GV, B.Before.Bytes.size());
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
// Build an alias named after the original global, pointing at the second
|
|
|
|
// element (the original initializer).
|
|
|
|
auto Alias = GlobalAlias::create(
|
|
|
|
B.GV->getInitializer()->getType(), 0, B.GV->getLinkage(), "",
|
|
|
|
ConstantExpr::getGetElementPtr(
|
|
|
|
NewInit->getType(), NewGV,
|
|
|
|
ArrayRef<Constant *>{ConstantInt::get(Int32Ty, 0),
|
|
|
|
ConstantInt::get(Int32Ty, 1)}),
|
|
|
|
&M);
|
|
|
|
Alias->setVisibility(B.GV->getVisibility());
|
|
|
|
Alias->takeName(B.GV);
|
|
|
|
|
|
|
|
B.GV->replaceAllUsesWith(Alias);
|
|
|
|
B.GV->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2016-08-12 03:09:02 +08:00
|
|
|
bool DevirtModule::areRemarksEnabled() {
|
|
|
|
const auto &FL = M.getFunctionList();
|
2018-09-18 21:42:24 +08:00
|
|
|
for (const Function &Fn : FL) {
|
|
|
|
const auto &BBL = Fn.getBasicBlockList();
|
|
|
|
if (BBL.empty())
|
|
|
|
continue;
|
|
|
|
auto DI = OptimizationRemark(DEBUG_TYPE, "", DebugLoc(), &BBL.front());
|
|
|
|
return DI.isEnabled();
|
|
|
|
}
|
|
|
|
return false;
|
2016-08-12 03:09:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-25 08:23:04 +08:00
|
|
|
void DevirtModule::scanTypeTestUsers(Function *TypeTestFunc,
|
|
|
|
Function *AssumeFunc) {
|
2016-02-10 06:50:34 +08:00
|
|
|
// Find all virtual calls via a virtual table pointer %p under an assumption
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
// of the form llvm.assume(llvm.type.test(%p, %md)). This indicates that %p
|
|
|
|
// points to a member of the type identifier %md. Group calls by (type ID,
|
|
|
|
// offset) pair (effectively the identity of the virtual function) and store
|
|
|
|
// to CallSlots.
|
2018-09-27 22:55:32 +08:00
|
|
|
DenseSet<CallSite> SeenCallSites;
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
for (auto I = TypeTestFunc->use_begin(), E = TypeTestFunc->use_end();
|
2016-02-10 06:50:34 +08:00
|
|
|
I != E;) {
|
|
|
|
auto CI = dyn_cast<CallInst>(I->getUser());
|
|
|
|
++I;
|
|
|
|
if (!CI)
|
|
|
|
continue;
|
|
|
|
|
2016-05-11 02:07:21 +08:00
|
|
|
// Search for virtual calls based on %p and add them to DevirtCalls.
|
|
|
|
SmallVector<DevirtCallSite, 1> DevirtCalls;
|
2016-02-10 06:50:34 +08:00
|
|
|
SmallVector<CallInst *, 1> Assumes;
|
2018-09-27 22:55:32 +08:00
|
|
|
auto &DT = LookupDomTree(*CI->getFunction());
|
|
|
|
findDevirtualizableCallsForTypeTest(DevirtCalls, Assumes, CI, DT);
|
2016-02-10 06:50:34 +08:00
|
|
|
|
2018-09-27 22:55:32 +08:00
|
|
|
// If we found any, add them to CallSlots.
|
2016-02-10 06:50:34 +08:00
|
|
|
if (!Assumes.empty()) {
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
Metadata *TypeId =
|
2016-02-10 06:50:34 +08:00
|
|
|
cast<MetadataAsValue>(CI->getArgOperand(1))->getMetadata();
|
|
|
|
Value *Ptr = CI->getArgOperand(0)->stripPointerCasts();
|
2018-09-27 22:55:32 +08:00
|
|
|
for (DevirtCallSite Call : DevirtCalls) {
|
|
|
|
// Only add this CallSite if we haven't seen it before. The vtable
|
|
|
|
// pointer may have been CSE'd with pointers from other call sites,
|
|
|
|
// and we don't want to process call sites multiple times. We can't
|
|
|
|
// just skip the vtable Ptr if it has been seen before, however, since
|
|
|
|
// it may be shared by type tests that dominate different calls.
|
|
|
|
if (SeenCallSites.insert(Call.CS).second)
|
2017-08-23 05:41:19 +08:00
|
|
|
CallSlots[{TypeId, Call.Offset}].addCallSite(Ptr, Call.CS, nullptr);
|
2016-05-11 02:07:21 +08:00
|
|
|
}
|
2016-02-10 06:50:34 +08:00
|
|
|
}
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
// We no longer need the assumes or the type test.
|
2016-02-10 06:50:34 +08:00
|
|
|
for (auto Assume : Assumes)
|
|
|
|
Assume->eraseFromParent();
|
|
|
|
// We can't use RecursivelyDeleteTriviallyDeadInstructions here because we
|
|
|
|
// may use the vtable argument later.
|
|
|
|
if (CI->use_empty())
|
|
|
|
CI->eraseFromParent();
|
|
|
|
}
|
2016-06-25 08:23:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void DevirtModule::scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc) {
|
|
|
|
Function *TypeTestFunc = Intrinsic::getDeclaration(&M, Intrinsic::type_test);
|
|
|
|
|
|
|
|
for (auto I = TypeCheckedLoadFunc->use_begin(),
|
|
|
|
E = TypeCheckedLoadFunc->use_end();
|
|
|
|
I != E;) {
|
|
|
|
auto CI = dyn_cast<CallInst>(I->getUser());
|
|
|
|
++I;
|
|
|
|
if (!CI)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
Value *Ptr = CI->getArgOperand(0);
|
|
|
|
Value *Offset = CI->getArgOperand(1);
|
|
|
|
Value *TypeIdValue = CI->getArgOperand(2);
|
|
|
|
Metadata *TypeId = cast<MetadataAsValue>(TypeIdValue)->getMetadata();
|
|
|
|
|
|
|
|
SmallVector<DevirtCallSite, 1> DevirtCalls;
|
|
|
|
SmallVector<Instruction *, 1> LoadedPtrs;
|
|
|
|
SmallVector<Instruction *, 1> Preds;
|
|
|
|
bool HasNonCallUses = false;
|
2018-09-27 22:55:32 +08:00
|
|
|
auto &DT = LookupDomTree(*CI->getFunction());
|
2016-06-25 08:23:04 +08:00
|
|
|
findDevirtualizableCallsForTypeCheckedLoad(DevirtCalls, LoadedPtrs, Preds,
|
2018-09-27 22:55:32 +08:00
|
|
|
HasNonCallUses, CI, DT);
|
2016-06-25 08:23:04 +08:00
|
|
|
|
|
|
|
// Start by generating "pessimistic" code that explicitly loads the function
|
|
|
|
// pointer from the vtable and performs the type check. If possible, we will
|
|
|
|
// eliminate the load and the type check later.
|
|
|
|
|
|
|
|
// If possible, only generate the load at the point where it is used.
|
|
|
|
// This helps avoid unnecessary spills.
|
|
|
|
IRBuilder<> LoadB(
|
|
|
|
(LoadedPtrs.size() == 1 && !HasNonCallUses) ? LoadedPtrs[0] : CI);
|
|
|
|
Value *GEP = LoadB.CreateGEP(Int8Ty, Ptr, Offset);
|
|
|
|
Value *GEPPtr = LoadB.CreateBitCast(GEP, PointerType::getUnqual(Int8PtrTy));
|
|
|
|
Value *LoadedValue = LoadB.CreateLoad(Int8PtrTy, GEPPtr);
|
|
|
|
|
|
|
|
for (Instruction *LoadedPtr : LoadedPtrs) {
|
|
|
|
LoadedPtr->replaceAllUsesWith(LoadedValue);
|
|
|
|
LoadedPtr->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Likewise for the type test.
|
|
|
|
IRBuilder<> CallB((Preds.size() == 1 && !HasNonCallUses) ? Preds[0] : CI);
|
|
|
|
CallInst *TypeTestCall = CallB.CreateCall(TypeTestFunc, {Ptr, TypeIdValue});
|
|
|
|
|
|
|
|
for (Instruction *Pred : Preds) {
|
|
|
|
Pred->replaceAllUsesWith(TypeTestCall);
|
|
|
|
Pred->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have already erased any extractvalue instructions that refer to the
|
|
|
|
// intrinsic call, but the intrinsic may have other non-extractvalue uses
|
|
|
|
// (although this is unlikely). In that case, explicitly build a pair and
|
|
|
|
// RAUW it.
|
|
|
|
if (!CI->use_empty()) {
|
|
|
|
Value *Pair = UndefValue::get(CI->getType());
|
|
|
|
IRBuilder<> B(CI);
|
|
|
|
Pair = B.CreateInsertValue(Pair, LoadedValue, {0});
|
|
|
|
Pair = B.CreateInsertValue(Pair, TypeTestCall, {1});
|
|
|
|
CI->replaceAllUsesWith(Pair);
|
|
|
|
}
|
|
|
|
|
|
|
|
// The number of unsafe uses is initially the number of uses.
|
|
|
|
auto &NumUnsafeUses = NumUnsafeUsesForTypeTest[TypeTestCall];
|
|
|
|
NumUnsafeUses = DevirtCalls.size();
|
|
|
|
|
|
|
|
// If the function pointer has a non-call user, we cannot eliminate the type
|
|
|
|
// check, as one of those users may eventually call the pointer. Increment
|
|
|
|
// the unsafe use count to make sure it cannot reach zero.
|
|
|
|
if (HasNonCallUses)
|
|
|
|
++NumUnsafeUses;
|
|
|
|
for (DevirtCallSite Call : DevirtCalls) {
|
2017-02-16 05:56:51 +08:00
|
|
|
CallSlots[{TypeId, Call.Offset}].addCallSite(Ptr, Call.CS,
|
|
|
|
&NumUnsafeUses);
|
2016-06-25 08:23:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CI->eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-09 08:21:25 +08:00
|
|
|
void DevirtModule::importResolution(VTableSlot Slot, VTableSlotInfo &SlotInfo) {
|
2019-08-02 21:10:52 +08:00
|
|
|
auto *TypeId = dyn_cast<MDString>(Slot.TypeID);
|
|
|
|
if (!TypeId)
|
|
|
|
return;
|
2017-03-23 02:04:39 +08:00
|
|
|
const TypeIdSummary *TidSummary =
|
2019-08-02 21:10:52 +08:00
|
|
|
ImportSummary->getTypeIdSummary(TypeId->getString());
|
2017-03-23 02:04:39 +08:00
|
|
|
if (!TidSummary)
|
|
|
|
return;
|
|
|
|
auto ResI = TidSummary->WPDRes.find(Slot.ByteOffset);
|
|
|
|
if (ResI == TidSummary->WPDRes.end())
|
|
|
|
return;
|
|
|
|
const WholeProgramDevirtResolution &Res = ResI->second;
|
2017-03-09 08:21:25 +08:00
|
|
|
|
|
|
|
if (Res.TheKind == WholeProgramDevirtResolution::SingleImpl) {
|
2019-08-02 21:10:52 +08:00
|
|
|
assert(!Res.SingleImplName.empty());
|
2017-03-09 08:21:25 +08:00
|
|
|
// The type of the function in the declaration is irrelevant because every
|
|
|
|
// call site will cast it to the correct type.
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 10:28:03 +08:00
|
|
|
Constant *SingleImpl =
|
|
|
|
cast<Constant>(M.getOrInsertFunction(Res.SingleImplName,
|
|
|
|
Type::getVoidTy(M.getContext()))
|
|
|
|
.getCallee());
|
2017-03-09 08:21:25 +08:00
|
|
|
|
|
|
|
// This is the import phase so we should not be exporting anything.
|
|
|
|
bool IsExported = false;
|
|
|
|
applySingleImplDevirt(SlotInfo, SingleImpl, IsExported);
|
|
|
|
assert(!IsExported);
|
|
|
|
}
|
2017-03-09 09:11:15 +08:00
|
|
|
|
|
|
|
for (auto &CSByConstantArg : SlotInfo.ConstCSInfo) {
|
|
|
|
auto I = Res.ResByArg.find(CSByConstantArg.first);
|
|
|
|
if (I == Res.ResByArg.end())
|
|
|
|
continue;
|
|
|
|
auto &ResByArg = I->second;
|
|
|
|
// FIXME: We should figure out what to do about the "function name" argument
|
|
|
|
// to the apply* functions, as the function names are unavailable during the
|
|
|
|
// importing phase. For now we just pass the empty string. This does not
|
|
|
|
// impact correctness because the function names are just used for remarks.
|
|
|
|
switch (ResByArg.TheKind) {
|
|
|
|
case WholeProgramDevirtResolution::ByArg::UniformRetVal:
|
|
|
|
applyUniformRetValOpt(CSByConstantArg.second, "", ResByArg.Info);
|
|
|
|
break;
|
2017-03-11 04:09:11 +08:00
|
|
|
case WholeProgramDevirtResolution::ByArg::UniqueRetVal: {
|
|
|
|
Constant *UniqueMemberAddr =
|
|
|
|
importGlobal(Slot, CSByConstantArg.first, "unique_member");
|
|
|
|
applyUniqueRetValOpt(CSByConstantArg.second, "", ResByArg.Info,
|
|
|
|
UniqueMemberAddr);
|
|
|
|
break;
|
|
|
|
}
|
2017-03-11 04:13:58 +08:00
|
|
|
case WholeProgramDevirtResolution::ByArg::VirtualConstProp: {
|
2017-09-12 06:34:42 +08:00
|
|
|
Constant *Byte = importConstant(Slot, CSByConstantArg.first, "byte",
|
|
|
|
Int32Ty, ResByArg.Byte);
|
|
|
|
Constant *Bit = importConstant(Slot, CSByConstantArg.first, "bit", Int8Ty,
|
|
|
|
ResByArg.Bit);
|
2017-03-11 04:13:58 +08:00
|
|
|
applyVirtualConstProp(CSByConstantArg.second, "", Byte, Bit);
|
2017-12-20 06:05:25 +08:00
|
|
|
break;
|
2017-03-11 04:13:58 +08:00
|
|
|
}
|
2017-03-09 09:11:15 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-03-10 03:11:44 +08:00
|
|
|
|
|
|
|
if (Res.TheKind == WholeProgramDevirtResolution::BranchFunnel) {
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 10:28:03 +08:00
|
|
|
// The type of the function is irrelevant, because it's bitcast at calls
|
|
|
|
// anyhow.
|
|
|
|
Constant *JT = cast<Constant>(
|
|
|
|
M.getOrInsertFunction(getGlobalName(Slot, {}, "branch_funnel"),
|
|
|
|
Type::getVoidTy(M.getContext()))
|
|
|
|
.getCallee());
|
2018-03-10 03:11:44 +08:00
|
|
|
bool IsExported = false;
|
|
|
|
applyICallBranchFunnel(SlotInfo, JT, IsExported);
|
|
|
|
assert(!IsExported);
|
|
|
|
}
|
2017-03-09 08:21:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void DevirtModule::removeRedundantTypeTests() {
|
|
|
|
auto True = ConstantInt::getTrue(M.getContext());
|
|
|
|
for (auto &&U : NumUnsafeUsesForTypeTest) {
|
|
|
|
if (U.second == 0) {
|
|
|
|
U.first->replaceAllUsesWith(True);
|
|
|
|
U.first->eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-25 08:23:04 +08:00
|
|
|
bool DevirtModule::run() {
|
2019-02-15 05:22:50 +08:00
|
|
|
// If only some of the modules were split, we cannot correctly perform
|
|
|
|
// this transformation. We already checked for the presense of type tests
|
|
|
|
// with partially split modules during the thin link, and would have emitted
|
|
|
|
// an error if any were found, so here we can simply return.
|
|
|
|
if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
|
|
|
|
(ImportSummary && ImportSummary->partiallySplitLTOUnits()))
|
|
|
|
return false;
|
|
|
|
|
2016-06-25 08:23:04 +08:00
|
|
|
Function *TypeTestFunc =
|
|
|
|
M.getFunction(Intrinsic::getName(Intrinsic::type_test));
|
|
|
|
Function *TypeCheckedLoadFunc =
|
|
|
|
M.getFunction(Intrinsic::getName(Intrinsic::type_checked_load));
|
|
|
|
Function *AssumeFunc = M.getFunction(Intrinsic::getName(Intrinsic::assume));
|
|
|
|
|
2017-03-04 09:23:30 +08:00
|
|
|
// Normally if there are no users of the devirtualization intrinsics in the
|
|
|
|
// module, this pass has nothing to do. But if we are exporting, we also need
|
|
|
|
// to handle any users that appear only in the function summaries.
|
2017-03-23 02:22:59 +08:00
|
|
|
if (!ExportSummary &&
|
2017-03-04 09:23:30 +08:00
|
|
|
(!TypeTestFunc || TypeTestFunc->use_empty() || !AssumeFunc ||
|
2016-06-25 08:23:04 +08:00
|
|
|
AssumeFunc->use_empty()) &&
|
|
|
|
(!TypeCheckedLoadFunc || TypeCheckedLoadFunc->use_empty()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (TypeTestFunc && AssumeFunc)
|
|
|
|
scanTypeTestUsers(TypeTestFunc, AssumeFunc);
|
|
|
|
|
|
|
|
if (TypeCheckedLoadFunc)
|
|
|
|
scanTypeCheckedLoadUsers(TypeCheckedLoadFunc);
|
2016-02-10 06:50:34 +08:00
|
|
|
|
2017-03-23 02:22:59 +08:00
|
|
|
if (ImportSummary) {
|
2017-03-09 08:21:25 +08:00
|
|
|
for (auto &S : CallSlots)
|
|
|
|
importResolution(S.first, S.second);
|
|
|
|
|
|
|
|
removeRedundantTypeTests();
|
|
|
|
|
|
|
|
// The rest of the code is only necessary when exporting or during regular
|
|
|
|
// LTO, so we are done.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
// Rebuild type metadata into a map for easy lookup.
|
2016-02-10 06:50:34 +08:00
|
|
|
std::vector<VTableBits> Bits;
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
DenseMap<Metadata *, std::set<TypeMemberInfo>> TypeIdMap;
|
|
|
|
buildTypeIdentifierMap(Bits, TypeIdMap);
|
|
|
|
if (TypeIdMap.empty())
|
2016-02-10 06:50:34 +08:00
|
|
|
return true;
|
|
|
|
|
2017-03-04 09:23:30 +08:00
|
|
|
// Collect information from summary about which calls to try to devirtualize.
|
2017-03-23 02:22:59 +08:00
|
|
|
if (ExportSummary) {
|
2017-03-04 09:23:30 +08:00
|
|
|
DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID;
|
|
|
|
for (auto &P : TypeIdMap) {
|
|
|
|
if (auto *TypeId = dyn_cast<MDString>(P.first))
|
|
|
|
MetadataByGUID[GlobalValue::getGUID(TypeId->getString())].push_back(
|
|
|
|
TypeId);
|
|
|
|
}
|
|
|
|
|
2017-03-23 02:22:59 +08:00
|
|
|
for (auto &P : *ExportSummary) {
|
2017-05-05 02:03:25 +08:00
|
|
|
for (auto &S : P.second.SummaryList) {
|
2017-03-04 09:23:30 +08:00
|
|
|
auto *FS = dyn_cast<FunctionSummary>(S.get());
|
|
|
|
if (!FS)
|
|
|
|
continue;
|
|
|
|
// FIXME: Only add live functions.
|
2017-03-10 18:31:56 +08:00
|
|
|
for (FunctionSummary::VFuncId VF : FS->type_test_assume_vcalls()) {
|
|
|
|
for (Metadata *MD : MetadataByGUID[VF.GUID]) {
|
2019-10-17 15:46:18 +08:00
|
|
|
CallSlots[{MD, VF.Offset}].CSInfo.addSummaryTypeTestAssumeUser(FS);
|
2017-03-10 18:31:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for (FunctionSummary::VFuncId VF : FS->type_checked_load_vcalls()) {
|
|
|
|
for (Metadata *MD : MetadataByGUID[VF.GUID]) {
|
2018-03-10 03:11:44 +08:00
|
|
|
CallSlots[{MD, VF.Offset}].CSInfo.addSummaryTypeCheckedLoadUser(FS);
|
2017-03-10 18:31:56 +08:00
|
|
|
}
|
|
|
|
}
|
2017-03-04 09:31:01 +08:00
|
|
|
for (const FunctionSummary::ConstVCall &VC :
|
2017-03-10 18:31:56 +08:00
|
|
|
FS->type_test_assume_const_vcalls()) {
|
|
|
|
for (Metadata *MD : MetadataByGUID[VC.VFunc.GUID]) {
|
2017-03-04 09:31:01 +08:00
|
|
|
CallSlots[{MD, VC.VFunc.Offset}]
|
2017-03-10 18:31:56 +08:00
|
|
|
.ConstCSInfo[VC.Args]
|
2019-10-17 15:46:18 +08:00
|
|
|
.addSummaryTypeTestAssumeUser(FS);
|
2017-03-10 18:31:56 +08:00
|
|
|
}
|
|
|
|
}
|
2017-03-04 09:23:30 +08:00
|
|
|
for (const FunctionSummary::ConstVCall &VC :
|
2017-03-10 18:31:56 +08:00
|
|
|
FS->type_checked_load_const_vcalls()) {
|
|
|
|
for (Metadata *MD : MetadataByGUID[VC.VFunc.GUID]) {
|
2017-03-04 09:23:30 +08:00
|
|
|
CallSlots[{MD, VC.VFunc.Offset}]
|
|
|
|
.ConstCSInfo[VC.Args]
|
2018-03-10 03:11:44 +08:00
|
|
|
.addSummaryTypeCheckedLoadUser(FS);
|
2017-03-10 18:31:56 +08:00
|
|
|
}
|
|
|
|
}
|
2017-03-04 09:23:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
// For each (type, offset) pair:
|
2016-02-10 06:50:34 +08:00
|
|
|
bool DidVirtualConstProp = false;
|
2016-08-12 03:09:02 +08:00
|
|
|
std::map<std::string, Function*> DevirtTargets;
|
2016-02-10 06:50:34 +08:00
|
|
|
for (auto &S : CallSlots) {
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
// Search each of the members of the type identifier for the virtual
|
|
|
|
// function implementation at offset S.first.ByteOffset, and add to
|
|
|
|
// TargetsForSlot.
|
2016-02-10 06:50:34 +08:00
|
|
|
std::vector<VirtualCallTarget> TargetsForSlot;
|
2017-03-04 09:23:30 +08:00
|
|
|
if (tryFindVirtualCallTargets(TargetsForSlot, TypeIdMap[S.first.TypeID],
|
|
|
|
S.first.ByteOffset)) {
|
2017-03-04 09:31:01 +08:00
|
|
|
WholeProgramDevirtResolution *Res = nullptr;
|
2017-03-23 02:22:59 +08:00
|
|
|
if (ExportSummary && isa<MDString>(S.first.TypeID))
|
|
|
|
Res = &ExportSummary
|
2017-03-23 02:04:39 +08:00
|
|
|
->getOrInsertTypeIdSummary(
|
|
|
|
cast<MDString>(S.first.TypeID)->getString())
|
|
|
|
.WPDRes[S.first.ByteOffset];
|
2017-03-04 09:31:01 +08:00
|
|
|
|
2019-10-17 15:46:18 +08:00
|
|
|
if (!trySingleImplDevirt(ExportSummary, TargetsForSlot, S.second, Res)) {
|
2018-03-10 03:11:44 +08:00
|
|
|
DidVirtualConstProp |=
|
|
|
|
tryVirtualConstProp(TargetsForSlot, S.second, Res, S.first);
|
|
|
|
|
|
|
|
tryICallBranchFunnel(TargetsForSlot, S.second, Res, S.first);
|
|
|
|
}
|
2016-08-12 03:09:02 +08:00
|
|
|
|
2017-03-04 09:23:30 +08:00
|
|
|
// Collect functions devirtualized at least for one call site for stats.
|
|
|
|
if (RemarksEnabled)
|
|
|
|
for (const auto &T : TargetsForSlot)
|
|
|
|
if (T.WasDevirt)
|
|
|
|
DevirtTargets[T.Fn->getName()] = T.Fn;
|
|
|
|
}
|
|
|
|
|
|
|
|
// CFI-specific: if we are exporting and any llvm.type.checked.load
|
|
|
|
// intrinsics were *not* devirtualized, we need to add the resulting
|
|
|
|
// llvm.type.test intrinsics to the function summaries so that the
|
|
|
|
// LowerTypeTests pass will export them.
|
2017-03-23 02:22:59 +08:00
|
|
|
if (ExportSummary && isa<MDString>(S.first.TypeID)) {
|
2017-03-04 09:23:30 +08:00
|
|
|
auto GUID =
|
|
|
|
GlobalValue::getGUID(cast<MDString>(S.first.TypeID)->getString());
|
|
|
|
for (auto FS : S.second.CSInfo.SummaryTypeCheckedLoadUsers)
|
|
|
|
FS->addTypeTest(GUID);
|
|
|
|
for (auto &CCS : S.second.ConstCSInfo)
|
|
|
|
for (auto FS : CCS.second.SummaryTypeCheckedLoadUsers)
|
|
|
|
FS->addTypeTest(GUID);
|
|
|
|
}
|
2016-08-12 03:09:02 +08:00
|
|
|
}
|
2016-02-10 06:50:34 +08:00
|
|
|
|
2016-08-12 03:09:02 +08:00
|
|
|
if (RemarksEnabled) {
|
|
|
|
// Generate remarks for each devirtualized function.
|
|
|
|
for (const auto &DT : DevirtTargets) {
|
|
|
|
Function *F = DT.second;
|
2017-08-22 00:57:21 +08:00
|
|
|
|
|
|
|
using namespace ore;
|
2018-01-05 08:27:51 +08:00
|
|
|
OREGetter(F).emit(OptimizationRemark(DEBUG_TYPE, "Devirtualized", F)
|
|
|
|
<< "devirtualized "
|
2019-08-02 21:10:52 +08:00
|
|
|
<< NV("FunctionName", DT.first));
|
2016-08-06 03:45:16 +08:00
|
|
|
}
|
2016-02-10 06:50:34 +08:00
|
|
|
}
|
|
|
|
|
2017-03-09 08:21:25 +08:00
|
|
|
removeRedundantTypeTests();
|
2016-06-25 08:23:04 +08:00
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
// Rebuild each global we touched as part of virtual constant propagation to
|
|
|
|
// include the before and after bytes.
|
|
|
|
if (DidVirtualConstProp)
|
|
|
|
for (VTableBits &B : Bits)
|
|
|
|
rebuildGlobal(B);
|
|
|
|
|
Reland: Dead Virtual Function Elimination
Remove dead virtual functions from vtables with
replaceNonMetadataUsesWith, so that CGProfile metadata gets cleaned up
correctly.
Original commit message:
Currently, it is hard for the compiler to remove unused C++ virtual
functions, because they are all referenced from vtables, which are referenced
by constructors. This means that if the constructor is called from any live
code, then we keep every virtual function in the final link, even if there
are no call sites which can use it.
This patch allows unused virtual functions to be removed during LTO (and
regular compilation in limited circumstances) by using type metadata to match
virtual function call sites to the vtable slots they might load from. This
information can then be used in the global dead code elimination pass instead
of the references from vtables to virtual functions, to more accurately
determine which functions are reachable.
To make this transformation safe, I have changed clang's code-generation to
always load virtual function pointers using the llvm.type.checked.load
intrinsic, instead of regular load instructions. I originally tried writing
this using clang's existing code-generation, which uses the llvm.type.test
and llvm.assume intrinsics after doing a normal load. However, it is possible
for optimisations to obscure the relationship between the GEP, load and
llvm.type.test, causing GlobalDCE to fail to find virtual function call
sites.
The existing linkage and visibility types don't accurately describe the scope
in which a virtual call could be made which uses a given vtable. This is
wider than the visibility of the type itself, because a virtual function call
could be made using a more-visible base class. I've added a new
!vcall_visibility metadata type to represent this, described in
TypeMetadata.rst. The internalization pass and libLTO have been updated to
change this metadata when linking is performed.
This doesn't currently work with ThinLTO, because it needs to see every call
to llvm.type.checked.load in the linkage unit. It might be possible to
extend this optimisation to be able to use the ThinLTO summary, as was done
for devirtualization, but until then that combination is rejected in the
clang driver.
To test this, I've written a fuzzer which generates random C++ programs with
complex class inheritance graphs, and virtual functions called through object
and function pointers of different types. The programs are spread across
multiple translation units and DSOs to test the different visibility
restrictions.
I've also tried doing bootstrap builds of LLVM to test this. This isn't
ideal, because only classes in anonymous namespaces can be optimised with
-fvisibility=default, and some parts of LLVM (plugins and bugpoint) do not
work correctly with -fvisibility=hidden. However, there are only 12 test
failures when building with -fvisibility=hidden (and an unmodified compiler),
and this change does not cause any new failures for either value of
-fvisibility.
On the 7 C++ sub-benchmarks of SPEC2006, this gives a geomean code-size
reduction of ~6%, over a baseline compiled with "-O2 -flto
-fvisibility=hidden -fwhole-program-vtables". The best cases are reductions
of ~14% in 450.soplex and 483.xalancbmk, and there are no code size
increases.
I've also run this on a set of 8 mbed-os examples compiled for Armv7M, which
show a geomean size reduction of ~3%, again with no size increases.
I had hoped that this would have no effect on performance, which would allow
it to awlays be enabled (when using -fwhole-program-vtables). However, the
changes in clang to use the llvm.type.checked.load intrinsic are causing ~1%
performance regression in the C++ parts of SPEC2006. It should be possible to
recover some of this perf loss by teaching optimisations about the
llvm.type.checked.load intrinsic, which would make it worth turning this on
by default (though it's still dependent on -fwhole-program-vtables).
Differential revision: https://reviews.llvm.org/D63932
llvm-svn: 375094
2019-10-17 17:58:57 +08:00
|
|
|
// We have lowered or deleted the type checked load intrinsics, so we no
|
|
|
|
// longer have enough information to reason about the liveness of virtual
|
|
|
|
// function pointers in GlobalDCE.
|
|
|
|
for (GlobalVariable &GV : M.globals())
|
|
|
|
GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
return true;
|
|
|
|
}
|
2019-08-02 21:10:52 +08:00
|
|
|
|
|
|
|
void DevirtIndex::run() {
|
|
|
|
if (ExportSummary.typeIdCompatibleVtableMap().empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
DenseMap<GlobalValue::GUID, std::vector<StringRef>> NameByGUID;
|
|
|
|
for (auto &P : ExportSummary.typeIdCompatibleVtableMap()) {
|
|
|
|
NameByGUID[GlobalValue::getGUID(P.first)].push_back(P.first);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect information from summary about which calls to try to devirtualize.
|
|
|
|
for (auto &P : ExportSummary) {
|
|
|
|
for (auto &S : P.second.SummaryList) {
|
|
|
|
auto *FS = dyn_cast<FunctionSummary>(S.get());
|
|
|
|
if (!FS)
|
|
|
|
continue;
|
|
|
|
// FIXME: Only add live functions.
|
|
|
|
for (FunctionSummary::VFuncId VF : FS->type_test_assume_vcalls()) {
|
|
|
|
for (StringRef Name : NameByGUID[VF.GUID]) {
|
|
|
|
CallSlots[{Name, VF.Offset}].CSInfo.addSummaryTypeTestAssumeUser(FS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (FunctionSummary::VFuncId VF : FS->type_checked_load_vcalls()) {
|
|
|
|
for (StringRef Name : NameByGUID[VF.GUID]) {
|
|
|
|
CallSlots[{Name, VF.Offset}].CSInfo.addSummaryTypeCheckedLoadUser(FS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (const FunctionSummary::ConstVCall &VC :
|
|
|
|
FS->type_test_assume_const_vcalls()) {
|
|
|
|
for (StringRef Name : NameByGUID[VC.VFunc.GUID]) {
|
|
|
|
CallSlots[{Name, VC.VFunc.Offset}]
|
|
|
|
.ConstCSInfo[VC.Args]
|
|
|
|
.addSummaryTypeTestAssumeUser(FS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (const FunctionSummary::ConstVCall &VC :
|
|
|
|
FS->type_checked_load_const_vcalls()) {
|
|
|
|
for (StringRef Name : NameByGUID[VC.VFunc.GUID]) {
|
|
|
|
CallSlots[{Name, VC.VFunc.Offset}]
|
|
|
|
.ConstCSInfo[VC.Args]
|
|
|
|
.addSummaryTypeCheckedLoadUser(FS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::set<ValueInfo> DevirtTargets;
|
|
|
|
// For each (type, offset) pair:
|
|
|
|
for (auto &S : CallSlots) {
|
|
|
|
// Search each of the members of the type identifier for the virtual
|
|
|
|
// function implementation at offset S.first.ByteOffset, and add to
|
|
|
|
// TargetsForSlot.
|
|
|
|
std::vector<ValueInfo> TargetsForSlot;
|
|
|
|
auto TidSummary = ExportSummary.getTypeIdCompatibleVtableSummary(S.first.TypeID);
|
|
|
|
assert(TidSummary);
|
|
|
|
if (tryFindVirtualCallTargets(TargetsForSlot, *TidSummary,
|
|
|
|
S.first.ByteOffset)) {
|
|
|
|
WholeProgramDevirtResolution *Res =
|
|
|
|
&ExportSummary.getOrInsertTypeIdSummary(S.first.TypeID)
|
|
|
|
.WPDRes[S.first.ByteOffset];
|
|
|
|
|
|
|
|
if (!trySingleImplDevirt(TargetsForSlot, S.first, S.second, Res,
|
|
|
|
DevirtTargets))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Optionally have the thin link print message for each devirtualized
|
|
|
|
// function.
|
|
|
|
if (PrintSummaryDevirt)
|
|
|
|
for (const auto &DT : DevirtTargets)
|
|
|
|
errs() << "Devirtualized call to " << DT << "\n";
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|