2012-02-25 15:20:06 +08:00
|
|
|
//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2001-06-07 04:29:01 +08:00
|
|
|
//
|
2002-03-30 03:06:18 +08:00
|
|
|
// This file defines the function verifier interface, that can be used for some
|
2001-06-07 04:29:01 +08:00
|
|
|
// sanity checking of input to the system.
|
|
|
|
//
|
2004-06-25 05:47:35 +08:00
|
|
|
// Note that this does not provide full `Java style' security and verifications,
|
|
|
|
// instead it just tries to ensure that code is well-formed.
|
2001-06-07 04:29:01 +08:00
|
|
|
//
|
2004-06-25 05:47:35 +08:00
|
|
|
// * Both of a binary operator's parameters are of the same type
|
2002-04-25 03:12:21 +08:00
|
|
|
// * Verify that the indices of mem access instructions match other operands
|
2004-06-25 05:47:35 +08:00
|
|
|
// * Verify that arithmetic and other things are only performed on first-class
|
2002-08-03 01:37:08 +08:00
|
|
|
// types. Verify that shifts & logicals only happen on integrals f.e.
|
2004-06-25 05:47:35 +08:00
|
|
|
// * All of the constants in a switch statement are of the correct type
|
2002-08-03 01:37:08 +08:00
|
|
|
// * The code is in valid SSA form
|
2004-06-25 05:47:35 +08:00
|
|
|
// * It should be illegal to put a label into any other type (like a structure)
|
2001-06-07 04:29:01 +08:00
|
|
|
// or to return one. [except constant arrays!]
|
2008-03-28 14:46:51 +08:00
|
|
|
// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
|
2002-02-21 01:55:43 +08:00
|
|
|
// * PHI nodes must have an entry for each predecessor, with no extras.
|
2002-06-25 23:56:27 +08:00
|
|
|
// * PHI nodes must be the first thing in a basic block, all grouped together
|
2002-10-07 05:00:31 +08:00
|
|
|
// * PHI nodes must have at least one entry
|
2002-06-25 23:56:27 +08:00
|
|
|
// * All basic blocks should only end with terminator insts, not contain them
|
2002-03-30 03:06:18 +08:00
|
|
|
// * The entry node to a function must not have predecessors
|
2003-10-11 01:54:14 +08:00
|
|
|
// * All Instructions must be embedded into a basic block
|
2004-06-25 05:47:35 +08:00
|
|
|
// * Functions cannot take a void-typed parameter
|
2002-04-14 06:48:46 +08:00
|
|
|
// * Verify that a function's argument list agrees with it's declared type.
|
2002-03-16 04:25:09 +08:00
|
|
|
// * It is illegal to specify a name for a void value.
|
2003-10-11 01:54:14 +08:00
|
|
|
// * It is illegal to have a internal global value with no initializer
|
2002-04-13 02:20:49 +08:00
|
|
|
// * It is illegal to have a ret instruction that returns a value that does not
|
|
|
|
// agree with the function return value type.
|
2002-05-09 03:49:50 +08:00
|
|
|
// * Function call argument types match the function prototype
|
2011-08-13 04:24:12 +08:00
|
|
|
// * A landing pad is defined by a landingpad instruction, and can be jumped to
|
|
|
|
// only by the unwind edge of an invoke instruction.
|
|
|
|
// * A landingpad instruction must be the first non-PHI instruction in the
|
|
|
|
// block.
|
2015-10-22 04:33:31 +08:00
|
|
|
// * Landingpad instructions must be in a function with a personality function.
|
2002-04-25 03:12:21 +08:00
|
|
|
// * All other things that are tested by asserts spread about the code...
|
2001-06-07 04:29:01 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Verifier.h"
|
2016-01-10 12:31:05 +08:00
|
|
|
#include "llvm/ADT/MapVector.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SetVector.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2014-03-04 19:45:46 +08:00
|
|
|
#include "llvm/IR/CFG.h"
|
2014-03-04 19:01:28 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/CallingConv.h"
|
2014-03-04 20:24:34 +08:00
|
|
|
#include "llvm/IR/ConstantRange.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
2013-08-01 01:49:08 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2014-03-06 08:46:21 +08:00
|
|
|
#include "llvm/IR/DebugInfo.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/InlineAsm.h"
|
2014-04-16 00:27:38 +08:00
|
|
|
#include "llvm/IR/InstIterator.h"
|
2014-03-06 11:23:41 +08:00
|
|
|
#include "llvm/IR/InstVisitor.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "llvm/IR/Metadata.h"
|
|
|
|
#include "llvm/IR/Module.h"
|
2016-04-21 01:27:44 +08:00
|
|
|
#include "llvm/IR/ModuleSlotTracker.h"
|
2014-01-20 19:34:08 +08:00
|
|
|
#include "llvm/IR/PassManager.h"
|
2014-12-04 03:53:15 +08:00
|
|
|
#include "llvm/IR/Statepoint.h"
|
2008-03-13 01:45:29 +08:00
|
|
|
#include "llvm/Pass.h"
|
2013-07-19 08:31:03 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-09 02:01:40 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2009-03-01 05:05:51 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2002-02-21 01:55:43 +08:00
|
|
|
#include <algorithm>
|
2006-03-31 15:22:05 +08:00
|
|
|
#include <cstdarg>
|
2003-11-22 04:23:48 +08:00
|
|
|
using namespace llvm;
|
2003-11-12 06:41:34 +08:00
|
|
|
|
2015-03-18 01:28:41 +08:00
|
|
|
static cl::opt<bool> VerifyDebugInfo("verify-debug-info", cl::init(true));
|
2013-07-19 08:31:03 +08:00
|
|
|
|
2008-05-13 08:00:25 +08:00
|
|
|
namespace {
|
2014-04-16 00:27:32 +08:00
|
|
|
struct VerifierSupport {
|
2016-04-21 00:17:37 +08:00
|
|
|
raw_ostream *OS;
|
2016-04-20 23:55:24 +08:00
|
|
|
const Module *M = nullptr;
|
2016-04-21 01:27:44 +08:00
|
|
|
Optional<ModuleSlotTracker> MST;
|
2014-04-16 00:27:32 +08:00
|
|
|
|
2016-04-20 23:55:24 +08:00
|
|
|
/// Track the brokenness of the module while recursively visiting.
|
|
|
|
bool Broken = false;
|
2014-04-16 00:27:32 +08:00
|
|
|
|
2016-04-21 00:17:37 +08:00
|
|
|
explicit VerifierSupport(raw_ostream *OS) : OS(OS) {}
|
2014-04-16 00:27:32 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
private:
|
2015-10-09 07:49:46 +08:00
|
|
|
template <class NodeTy> void Write(const ilist_iterator<NodeTy> &I) {
|
|
|
|
Write(&*I);
|
|
|
|
}
|
|
|
|
|
2015-12-02 03:06:36 +08:00
|
|
|
void Write(const Module *M) {
|
|
|
|
if (!M)
|
|
|
|
return;
|
2016-04-21 00:17:37 +08:00
|
|
|
*OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
|
2015-12-02 03:06:36 +08:00
|
|
|
}
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
void Write(const Value *V) {
|
2014-04-16 00:27:32 +08:00
|
|
|
if (!V)
|
|
|
|
return;
|
|
|
|
if (isa<Instruction>(V)) {
|
2016-04-21 01:27:44 +08:00
|
|
|
V->print(*OS, *MST);
|
|
|
|
*OS << '\n';
|
2014-04-16 00:27:32 +08:00
|
|
|
} else {
|
2016-04-21 01:27:44 +08:00
|
|
|
V->printAsOperand(*OS, true, *MST);
|
2016-04-21 00:17:37 +08:00
|
|
|
*OS << '\n';
|
2014-04-16 00:27:32 +08:00
|
|
|
}
|
|
|
|
}
|
2015-06-27 06:04:34 +08:00
|
|
|
void Write(ImmutableCallSite CS) {
|
|
|
|
Write(CS.getInstruction());
|
2015-06-27 05:39:44 +08:00
|
|
|
}
|
2014-04-16 00:27:32 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
void Write(const Metadata *MD) {
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
if (!MD)
|
|
|
|
return;
|
2016-04-21 01:27:44 +08:00
|
|
|
MD->print(*OS, *MST, M);
|
2016-04-21 00:17:37 +08:00
|
|
|
*OS << '\n';
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
}
|
|
|
|
|
2015-04-08 00:50:39 +08:00
|
|
|
template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
|
|
|
|
Write(MD.get());
|
|
|
|
}
|
|
|
|
|
2015-03-25 01:18:03 +08:00
|
|
|
void Write(const NamedMDNode *NMD) {
|
|
|
|
if (!NMD)
|
|
|
|
return;
|
2016-04-21 01:27:44 +08:00
|
|
|
NMD->print(*OS, *MST);
|
2016-04-21 00:17:37 +08:00
|
|
|
*OS << '\n';
|
2015-03-25 01:18:03 +08:00
|
|
|
}
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
void Write(Type *T) {
|
2014-04-16 00:27:32 +08:00
|
|
|
if (!T)
|
|
|
|
return;
|
2016-04-21 00:17:37 +08:00
|
|
|
*OS << ' ' << *T;
|
2014-04-16 00:27:32 +08:00
|
|
|
}
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
void Write(const Comdat *C) {
|
2014-06-28 02:19:56 +08:00
|
|
|
if (!C)
|
|
|
|
return;
|
2016-04-21 00:17:37 +08:00
|
|
|
*OS << *C;
|
2014-06-28 02:19:56 +08:00
|
|
|
}
|
|
|
|
|
2016-01-10 12:31:05 +08:00
|
|
|
template <typename T> void Write(ArrayRef<T> Vs) {
|
|
|
|
for (const T &V : Vs)
|
|
|
|
Write(V);
|
|
|
|
}
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
template <typename T1, typename... Ts>
|
|
|
|
void WriteTs(const T1 &V1, const Ts &... Vs) {
|
|
|
|
Write(V1);
|
|
|
|
WriteTs(Vs...);
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
}
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
template <typename... Ts> void WriteTs() {}
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
public:
|
2015-03-17 01:49:03 +08:00
|
|
|
/// \brief A check failed, so printout out the condition and the message.
|
|
|
|
///
|
|
|
|
/// This provides a nice place to put a breakpoint if you want to see why
|
|
|
|
/// something is not correct.
|
2015-03-15 00:47:37 +08:00
|
|
|
void CheckFailed(const Twine &Message) {
|
2016-04-21 00:17:37 +08:00
|
|
|
if (OS)
|
|
|
|
*OS << Message << '\n';
|
2015-03-31 10:37:13 +08:00
|
|
|
Broken = true;
|
2014-06-28 02:19:56 +08:00
|
|
|
}
|
2015-03-15 00:47:37 +08:00
|
|
|
|
2015-03-17 01:49:03 +08:00
|
|
|
/// \brief A check failed (with values to print).
|
|
|
|
///
|
|
|
|
/// This calls the Message-only version so that the above is easier to set a
|
|
|
|
/// breakpoint on.
|
2015-03-15 00:47:37 +08:00
|
|
|
template <typename T1, typename... Ts>
|
|
|
|
void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
|
|
|
|
CheckFailed(Message);
|
2016-04-21 00:17:37 +08:00
|
|
|
if (OS)
|
|
|
|
WriteTs(V1, Vs...);
|
2015-03-15 00:47:37 +08:00
|
|
|
}
|
2014-04-16 00:27:32 +08:00
|
|
|
};
|
2015-03-08 05:15:40 +08:00
|
|
|
|
2014-04-16 00:27:32 +08:00
|
|
|
class Verifier : public InstVisitor<Verifier>, VerifierSupport {
|
|
|
|
friend class InstVisitor<Verifier>;
|
|
|
|
|
2014-01-17 19:09:34 +08:00
|
|
|
LLVMContext *Context;
|
|
|
|
DominatorTree DT;
|
|
|
|
|
|
|
|
/// \brief When verifying a basic block, keep track of all of the
|
|
|
|
/// instructions we have seen so far.
|
|
|
|
///
|
|
|
|
/// This allows us to do efficient dominance checks for the case when an
|
|
|
|
/// instruction has an operand that is an instruction in the same block.
|
|
|
|
SmallPtrSet<Instruction *, 16> InstsInThisBlock;
|
|
|
|
|
|
|
|
/// \brief Keep track of the metadata nodes that have been checked already.
|
2015-02-10 05:30:05 +08:00
|
|
|
SmallPtrSet<const Metadata *, 32> MDNodes;
|
2014-01-17 19:09:34 +08:00
|
|
|
|
2016-03-29 05:06:26 +08:00
|
|
|
/// Track all DICompileUnits visited.
|
|
|
|
SmallPtrSet<const Metadata *, 2> CUVisited;
|
|
|
|
|
2015-08-01 01:58:14 +08:00
|
|
|
/// \brief The result type for a landingpad.
|
|
|
|
Type *LandingPadResultTy;
|
|
|
|
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
/// \brief Whether we've seen a call to @llvm.localescape in this function
|
2015-01-13 08:48:10 +08:00
|
|
|
/// already.
|
2015-03-06 02:26:34 +08:00
|
|
|
bool SawFrameEscape;
|
|
|
|
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
/// Stores the count of how many objects were passed to llvm.localescape for a
|
|
|
|
/// given function and the largest index passed to llvm.localrecover.
|
2015-03-06 02:26:34 +08:00
|
|
|
DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
|
2015-01-13 08:48:10 +08:00
|
|
|
|
2016-01-10 12:31:05 +08:00
|
|
|
// Maps catchswitches and cleanuppads that unwind to siblings to the
|
|
|
|
// terminators that indicate the unwind, used to detect cycles therein.
|
|
|
|
MapVector<Instruction *, TerminatorInst *> SiblingFuncletInfo;
|
|
|
|
|
2015-12-11 01:56:06 +08:00
|
|
|
/// Cache of constants visited in search of ConstantExprs.
|
|
|
|
SmallPtrSet<const Constant *, 32> ConstantExprVisited;
|
|
|
|
|
2016-01-20 16:41:22 +08:00
|
|
|
// Verify that this GlobalValue is only used in this module.
|
|
|
|
// This map is used to avoid visiting uses twice. We can arrive at a user
|
|
|
|
// twice, if they have multiple operands. In particular for very large
|
|
|
|
// constant expressions, we can arrive at a particular user many times.
|
|
|
|
SmallPtrSet<const Value *, 32> GlobalValueVisited;
|
|
|
|
|
2015-12-18 06:09:19 +08:00
|
|
|
void checkAtomicMemAccessSize(const Module *M, Type *Ty,
|
|
|
|
const Instruction *I);
|
2016-04-21 01:27:44 +08:00
|
|
|
|
|
|
|
void updateModule(const Module *NewM) {
|
|
|
|
if (M == NewM)
|
|
|
|
return;
|
|
|
|
MST.emplace(NewM);
|
|
|
|
M = NewM;
|
|
|
|
}
|
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
public:
|
2016-04-21 00:17:37 +08:00
|
|
|
explicit Verifier(raw_ostream *OS)
|
2015-08-23 08:26:33 +08:00
|
|
|
: VerifierSupport(OS), Context(nullptr), LandingPadResultTy(nullptr),
|
2015-08-01 01:58:14 +08:00
|
|
|
SawFrameEscape(false) {}
|
2001-06-07 04:29:01 +08:00
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
bool verify(const Function &F) {
|
2016-04-21 01:27:44 +08:00
|
|
|
updateModule(F.getParent());
|
2014-01-19 10:22:18 +08:00
|
|
|
Context = &M->getContext();
|
2014-01-17 19:09:34 +08:00
|
|
|
|
|
|
|
// First ensure the function is well-enough formed to compute dominance
|
|
|
|
// information.
|
2014-01-19 10:22:18 +08:00
|
|
|
if (F.empty()) {
|
2016-04-21 00:17:37 +08:00
|
|
|
if (OS)
|
|
|
|
*OS << "Function '" << F.getName()
|
|
|
|
<< "' does not contain an entry block!\n";
|
2014-01-19 10:22:18 +08:00
|
|
|
return false;
|
|
|
|
}
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const BasicBlock &BB : F) {
|
2016-04-21 02:27:18 +08:00
|
|
|
if (!BB.empty() && BB.back().isTerminator())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (OS) {
|
|
|
|
*OS << "Basic Block in function '" << F.getName()
|
|
|
|
<< "' does not have terminator!\n";
|
2016-04-21 02:42:51 +08:00
|
|
|
BB.printAsOperand(*OS, true, *MST);
|
2016-04-21 02:27:18 +08:00
|
|
|
*OS << "\n";
|
2014-01-17 18:56:02 +08:00
|
|
|
}
|
2016-04-21 02:27:18 +08:00
|
|
|
return false;
|
2014-01-17 19:09:34 +08:00
|
|
|
}
|
2007-04-21 07:59:29 +08:00
|
|
|
|
2014-01-17 19:09:34 +08:00
|
|
|
// Now directly compute a dominance tree. We don't rely on the pass
|
|
|
|
// manager to provide this as it isolates us from a potentially
|
|
|
|
// out-of-date dominator tree and makes it significantly more complex to
|
|
|
|
// run this code outside of a pass manager.
|
2014-01-19 10:22:18 +08:00
|
|
|
// FIXME: It's really gross that we have to cast away constness here.
|
|
|
|
DT.recalculate(const_cast<Function &>(F));
|
2013-11-18 02:48:57 +08:00
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
Broken = false;
|
|
|
|
// FIXME: We strip const here because the inst visitor strips const.
|
|
|
|
visit(const_cast<Function &>(F));
|
2016-01-10 12:31:05 +08:00
|
|
|
verifySiblingFuncletUnwinds();
|
2014-01-17 19:09:34 +08:00
|
|
|
InstsInThisBlock.clear();
|
2015-08-01 01:58:14 +08:00
|
|
|
LandingPadResultTy = nullptr;
|
2015-03-06 02:26:34 +08:00
|
|
|
SawFrameEscape = false;
|
2016-01-10 12:31:05 +08:00
|
|
|
SiblingFuncletInfo.clear();
|
2002-02-21 01:55:43 +08:00
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
return !Broken;
|
2014-01-17 19:09:34 +08:00
|
|
|
}
|
2004-06-03 14:38:43 +08:00
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
bool verify(const Module &M) {
|
2016-04-21 01:27:44 +08:00
|
|
|
updateModule(&M);
|
2014-01-19 10:22:18 +08:00
|
|
|
Context = &M.getContext();
|
|
|
|
Broken = false;
|
|
|
|
|
2014-01-17 19:09:34 +08:00
|
|
|
// Scan through, checking all of the external function's linkage now...
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const Function &F : M) {
|
|
|
|
visitGlobalValue(F);
|
2002-10-07 06:47:32 +08:00
|
|
|
|
2014-01-17 19:09:34 +08:00
|
|
|
// Check to make sure function prototypes are okay.
|
2016-02-26 00:44:27 +08:00
|
|
|
if (F.isDeclaration())
|
|
|
|
visitFunction(F);
|
2014-01-17 19:09:34 +08:00
|
|
|
}
|
2007-04-25 22:27:10 +08:00
|
|
|
|
2015-03-06 02:26:34 +08:00
|
|
|
// Now that we've visited every function, verify that we never asked to
|
|
|
|
// recover a frame index that wasn't escaped.
|
|
|
|
verifyFrameRecoverIndices();
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const GlobalVariable &GV : M.globals())
|
|
|
|
visitGlobalVariable(GV);
|
2015-03-06 02:26:34 +08:00
|
|
|
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const GlobalAlias &GA : M.aliases())
|
|
|
|
visitGlobalAlias(GA);
|
2010-04-30 00:10:30 +08:00
|
|
|
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const NamedMDNode &NMD : M.named_metadata())
|
|
|
|
visitNamedMDNode(NMD);
|
2013-07-19 08:31:03 +08:00
|
|
|
|
2014-06-28 02:19:56 +08:00
|
|
|
for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
|
|
|
|
visitComdat(SMEC.getValue());
|
|
|
|
|
2014-01-17 19:09:34 +08:00
|
|
|
visitModuleFlags(M);
|
|
|
|
visitModuleIdents(M);
|
2002-04-25 03:12:21 +08:00
|
|
|
|
2016-03-29 05:06:26 +08:00
|
|
|
verifyCompileUnits();
|
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
return !Broken;
|
2014-01-17 19:09:34 +08:00
|
|
|
}
|
2003-11-22 01:35:51 +08:00
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
private:
|
2014-01-17 19:09:34 +08:00
|
|
|
// Verification methods...
|
2014-01-19 10:22:18 +08:00
|
|
|
void visitGlobalValue(const GlobalValue &GV);
|
|
|
|
void visitGlobalVariable(const GlobalVariable &GV);
|
|
|
|
void visitGlobalAlias(const GlobalAlias &GA);
|
2014-06-03 10:41:57 +08:00
|
|
|
void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
|
2014-08-21 13:55:13 +08:00
|
|
|
void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
|
2014-06-03 10:41:57 +08:00
|
|
|
const GlobalAlias &A, const Constant &C);
|
2014-01-19 10:22:18 +08:00
|
|
|
void visitNamedMDNode(const NamedMDNode &NMD);
|
2015-02-10 05:30:05 +08:00
|
|
|
void visitMDNode(const MDNode &MD);
|
|
|
|
void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
|
|
|
|
void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
|
2014-06-28 02:19:56 +08:00
|
|
|
void visitComdat(const Comdat &C);
|
2014-01-19 10:22:18 +08:00
|
|
|
void visitModuleIdents(const Module &M);
|
|
|
|
void visitModuleFlags(const Module &M);
|
|
|
|
void visitModuleFlag(const MDNode *Op,
|
|
|
|
DenseMap<const MDString *, const MDNode *> &SeenIDs,
|
|
|
|
SmallVectorImpl<const MDNode *> &Requirements);
|
|
|
|
void visitFunction(const Function &F);
|
2014-01-17 19:09:34 +08:00
|
|
|
void visitBasicBlock(BasicBlock &BB);
|
2014-10-21 07:52:07 +08:00
|
|
|
void visitRangeMetadata(Instruction& I, MDNode* Range, Type* Ty);
|
2015-10-10 01:41:29 +08:00
|
|
|
void visitDereferenceableMetadata(Instruction& I, MDNode* MD);
|
2014-10-21 07:52:07 +08:00
|
|
|
|
2015-03-31 00:19:15 +08:00
|
|
|
template <class Ty> bool isValidMetadataArray(const MDTuple &N);
|
2015-02-10 09:09:50 +08:00
|
|
|
#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
|
|
|
|
#include "llvm/IR/Metadata.def"
|
2015-04-30 00:38:44 +08:00
|
|
|
void visitDIScope(const DIScope &N);
|
|
|
|
void visitDIVariable(const DIVariable &N);
|
|
|
|
void visitDILexicalBlockBase(const DILexicalBlockBase &N);
|
|
|
|
void visitDITemplateParameter(const DITemplateParameter &N);
|
2014-01-17 19:09:34 +08:00
|
|
|
|
2015-04-07 01:04:58 +08:00
|
|
|
void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
|
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
// InstVisitor overrides...
|
|
|
|
using InstVisitor<Verifier>::visit;
|
2014-01-17 19:09:34 +08:00
|
|
|
void visit(Instruction &I);
|
|
|
|
|
|
|
|
void visitTruncInst(TruncInst &I);
|
|
|
|
void visitZExtInst(ZExtInst &I);
|
|
|
|
void visitSExtInst(SExtInst &I);
|
|
|
|
void visitFPTruncInst(FPTruncInst &I);
|
|
|
|
void visitFPExtInst(FPExtInst &I);
|
|
|
|
void visitFPToUIInst(FPToUIInst &I);
|
|
|
|
void visitFPToSIInst(FPToSIInst &I);
|
|
|
|
void visitUIToFPInst(UIToFPInst &I);
|
|
|
|
void visitSIToFPInst(SIToFPInst &I);
|
|
|
|
void visitIntToPtrInst(IntToPtrInst &I);
|
|
|
|
void visitPtrToIntInst(PtrToIntInst &I);
|
|
|
|
void visitBitCastInst(BitCastInst &I);
|
|
|
|
void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
|
|
|
|
void visitPHINode(PHINode &PN);
|
|
|
|
void visitBinaryOperator(BinaryOperator &B);
|
|
|
|
void visitICmpInst(ICmpInst &IC);
|
|
|
|
void visitFCmpInst(FCmpInst &FC);
|
|
|
|
void visitExtractElementInst(ExtractElementInst &EI);
|
|
|
|
void visitInsertElementInst(InsertElementInst &EI);
|
|
|
|
void visitShuffleVectorInst(ShuffleVectorInst &EI);
|
|
|
|
void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
|
|
|
|
void visitCallInst(CallInst &CI);
|
|
|
|
void visitInvokeInst(InvokeInst &II);
|
|
|
|
void visitGetElementPtrInst(GetElementPtrInst &GEP);
|
|
|
|
void visitLoadInst(LoadInst &LI);
|
|
|
|
void visitStoreInst(StoreInst &SI);
|
|
|
|
void verifyDominatesUse(Instruction &I, unsigned i);
|
|
|
|
void visitInstruction(Instruction &I);
|
|
|
|
void visitTerminatorInst(TerminatorInst &I);
|
|
|
|
void visitBranchInst(BranchInst &BI);
|
|
|
|
void visitReturnInst(ReturnInst &RI);
|
|
|
|
void visitSwitchInst(SwitchInst &SI);
|
|
|
|
void visitIndirectBrInst(IndirectBrInst &BI);
|
|
|
|
void visitSelectInst(SelectInst &SI);
|
|
|
|
void visitUserOp1(Instruction &I);
|
|
|
|
void visitUserOp2(Instruction &I) { visitUserOp1(I); }
|
2015-06-27 06:21:52 +08:00
|
|
|
void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS);
|
2015-03-15 09:21:30 +08:00
|
|
|
template <class DbgIntrinsicTy>
|
|
|
|
void visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII);
|
2014-01-17 19:09:34 +08:00
|
|
|
void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
|
|
|
|
void visitAtomicRMWInst(AtomicRMWInst &RMWI);
|
|
|
|
void visitFenceInst(FenceInst &FI);
|
|
|
|
void visitAllocaInst(AllocaInst &AI);
|
|
|
|
void visitExtractValueInst(ExtractValueInst &EVI);
|
|
|
|
void visitInsertValueInst(InsertValueInst &IVI);
|
2015-08-11 10:48:30 +08:00
|
|
|
void visitEHPadPredecessors(Instruction &I);
|
2014-01-17 19:09:34 +08:00
|
|
|
void visitLandingPadInst(LandingPadInst &LPI);
|
2015-08-01 01:58:14 +08:00
|
|
|
void visitCatchPadInst(CatchPadInst &CPI);
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
void visitCatchReturnInst(CatchReturnInst &CatchReturn);
|
2015-08-01 01:58:14 +08:00
|
|
|
void visitCleanupPadInst(CleanupPadInst &CPI);
|
2016-01-10 12:30:02 +08:00
|
|
|
void visitFuncletPadInst(FuncletPadInst &FPI);
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
|
2015-08-01 01:58:14 +08:00
|
|
|
void visitCleanupReturnInst(CleanupReturnInst &CRI);
|
2014-01-17 19:09:34 +08:00
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
void verifyCallSite(CallSite CS);
|
2016-04-02 05:41:15 +08:00
|
|
|
void verifySwiftErrorCallSite(CallSite CS, const Value *SwiftErrorVal);
|
|
|
|
void verifySwiftErrorValue(const Value *SwiftErrorVal);
|
2014-04-25 04:14:34 +08:00
|
|
|
void verifyMustTailCall(CallInst &CI);
|
2016-02-01 00:32:23 +08:00
|
|
|
bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
|
2014-01-17 19:09:34 +08:00
|
|
|
unsigned ArgNo, std::string &Suffix);
|
2016-02-01 00:32:23 +08:00
|
|
|
bool verifyIntrinsicType(Type *Ty, ArrayRef<Intrinsic::IITDescriptor> &Infos,
|
2014-01-17 19:09:34 +08:00
|
|
|
SmallVectorImpl<Type *> &ArgTys);
|
2016-02-01 00:32:23 +08:00
|
|
|
bool verifyIntrinsicIsVarArg(bool isVarArg,
|
2014-01-17 19:09:34 +08:00
|
|
|
ArrayRef<Intrinsic::IITDescriptor> &Infos);
|
2016-02-01 00:32:23 +08:00
|
|
|
bool verifyAttributeCount(AttributeSet Attrs, unsigned Params);
|
|
|
|
void verifyAttributeTypes(AttributeSet Attrs, unsigned Idx, bool isFunction,
|
2014-01-17 19:09:34 +08:00
|
|
|
const Value *V);
|
2016-02-01 00:32:23 +08:00
|
|
|
void verifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
|
2014-01-17 19:09:34 +08:00
|
|
|
bool isReturnValue, const Value *V);
|
2016-02-01 00:32:23 +08:00
|
|
|
void verifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
|
2014-01-17 19:09:34 +08:00
|
|
|
const Value *V);
|
2016-02-01 00:32:23 +08:00
|
|
|
void verifyFunctionMetadata(
|
2015-05-13 23:13:45 +08:00
|
|
|
const SmallVector<std::pair<unsigned, MDNode *>, 4> MDs);
|
2014-01-17 19:09:34 +08:00
|
|
|
|
2015-12-11 01:56:06 +08:00
|
|
|
void visitConstantExprsRecursively(const Constant *EntryC);
|
|
|
|
void visitConstantExpr(const ConstantExpr *CE);
|
2016-02-01 00:32:23 +08:00
|
|
|
void verifyStatepoint(ImmutableCallSite CS);
|
2015-03-06 02:26:34 +08:00
|
|
|
void verifyFrameRecoverIndices();
|
2016-01-10 12:31:05 +08:00
|
|
|
void verifySiblingFuncletUnwinds();
|
2014-04-16 00:27:38 +08:00
|
|
|
|
2016-04-24 05:08:00 +08:00
|
|
|
void verifyBitPieceExpression(const DbgInfoIntrinsic &I);
|
|
|
|
|
2016-03-29 05:06:26 +08:00
|
|
|
/// Module-level debug info verification...
|
|
|
|
void verifyCompileUnits();
|
2014-01-17 19:09:34 +08:00
|
|
|
};
|
2003-11-22 04:23:48 +08:00
|
|
|
} // End anonymous namespace
|
|
|
|
|
2002-04-19 04:37:37 +08:00
|
|
|
// Assert - We know that cond should be true, if not print an error message.
|
2015-03-08 05:15:40 +08:00
|
|
|
#define Assert(C, ...) \
|
|
|
|
do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (0)
|
2002-02-21 01:55:43 +08:00
|
|
|
|
2008-08-28 12:02:44 +08:00
|
|
|
void Verifier::visit(Instruction &I) {
|
|
|
|
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(I.getOperand(i) != nullptr, "Operand is null", &I);
|
2008-08-28 12:02:44 +08:00
|
|
|
InstVisitor<Verifier>::visit(I);
|
|
|
|
}
|
|
|
|
|
2016-01-15 06:20:56 +08:00
|
|
|
// Helper to recursively iterate over indirect users. By
|
|
|
|
// returning false, the callback can ask to stop recursing
|
|
|
|
// further.
|
|
|
|
static void forEachUser(const Value *User,
|
|
|
|
SmallPtrSet<const Value *, 32> &Visited,
|
|
|
|
llvm::function_ref<bool(const Value *)> Callback) {
|
|
|
|
if (!Visited.insert(User).second)
|
|
|
|
return;
|
2016-01-16 03:00:20 +08:00
|
|
|
for (const Value *TheNextUser : User->materialized_users())
|
2016-01-15 06:20:56 +08:00
|
|
|
if (Callback(TheNextUser))
|
|
|
|
forEachUser(TheNextUser, Visited, Callback);
|
|
|
|
}
|
2008-08-28 12:02:44 +08:00
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
void Verifier::visitGlobalValue(const GlobalValue &GV) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!GV.isDeclaration() || GV.hasExternalLinkage() ||
|
|
|
|
GV.hasExternalWeakLinkage(),
|
|
|
|
"Global is external, but doesn't have external or weak linkage!", &GV);
|
2006-09-15 02:23:27 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(GV.getAlignment() <= Value::MaximumAlignment,
|
|
|
|
"huge alignment values are unsupported", &GV);
|
|
|
|
Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
|
|
|
|
"Only global variables can have appending linkage!", &GV);
|
2003-04-17 04:42:40 +08:00
|
|
|
|
|
|
|
if (GV.hasAppendingLinkage()) {
|
2014-01-19 10:22:18 +08:00
|
|
|
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
|
2015-05-08 08:42:26 +08:00
|
|
|
Assert(GVar && GVar->getValueType()->isArrayTy(),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Only global arrays can have appending linkage!", GVar);
|
2003-04-17 04:42:40 +08:00
|
|
|
}
|
2015-07-06 04:52:40 +08:00
|
|
|
|
|
|
|
if (GV.isDeclarationForLinker())
|
|
|
|
Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
|
2016-01-15 06:20:56 +08:00
|
|
|
|
2016-01-20 16:41:22 +08:00
|
|
|
forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
|
2016-01-15 06:20:56 +08:00
|
|
|
if (const Instruction *I = dyn_cast<Instruction>(V)) {
|
|
|
|
if (!I->getParent() || !I->getParent()->getParent())
|
|
|
|
CheckFailed("Global is referenced by parentless instruction!", &GV,
|
|
|
|
M, I);
|
|
|
|
else if (I->getParent()->getParent()->getParent() != M)
|
|
|
|
CheckFailed("Global is referenced in a different module!", &GV,
|
|
|
|
M, I, I->getParent()->getParent(),
|
|
|
|
I->getParent()->getParent()->getParent());
|
|
|
|
return false;
|
|
|
|
} else if (const Function *F = dyn_cast<Function>(V)) {
|
|
|
|
if (F->getParent() != M)
|
|
|
|
CheckFailed("Global is used by function in a different module", &GV,
|
|
|
|
M, F, F->getParent());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
});
|
2003-04-17 04:42:40 +08:00
|
|
|
}
|
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
|
2007-09-20 01:14:45 +08:00
|
|
|
if (GV.hasInitializer()) {
|
2016-01-17 04:30:46 +08:00
|
|
|
Assert(GV.getInitializer()->getType() == GV.getValueType(),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Global variable initializer type does not match global "
|
|
|
|
"variable type!",
|
|
|
|
&GV);
|
2009-05-30 13:06:04 +08:00
|
|
|
|
2009-08-05 13:41:44 +08:00
|
|
|
// If the global has common linkage, it must have a zero initializer and
|
|
|
|
// cannot be constant.
|
|
|
|
if (GV.hasCommonLinkage()) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(GV.getInitializer()->isNullValue(),
|
|
|
|
"'common' global must have a zero initializer!", &GV);
|
|
|
|
Assert(!GV.isConstant(), "'common' global may not be marked constant!",
|
|
|
|
&GV);
|
|
|
|
Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
|
2009-08-05 13:41:44 +08:00
|
|
|
}
|
2007-09-20 01:14:45 +08:00
|
|
|
} else {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(GV.hasExternalLinkage() || GV.hasExternalWeakLinkage(),
|
|
|
|
"invalid linkage type for global declaration", &GV);
|
2007-09-20 01:14:45 +08:00
|
|
|
}
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2011-04-08 15:30:21 +08:00
|
|
|
if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
|
|
|
|
GV.getName() == "llvm.global_dtors")) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
|
|
|
|
"invalid linkage for intrinsic global variable", &GV);
|
2011-04-08 15:30:21 +08:00
|
|
|
// Don't worry about emitting an error for it not being an array,
|
|
|
|
// visitGlobalValue will complain on appending non-array.
|
2015-05-08 08:42:26 +08:00
|
|
|
if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
|
2011-07-18 12:54:35 +08:00
|
|
|
StructType *STy = dyn_cast<StructType>(ATy->getElementType());
|
|
|
|
PointerType *FuncPtrTy =
|
2012-10-25 01:25:11 +08:00
|
|
|
FunctionType::get(Type::getVoidTy(*Context), false)->getPointerTo();
|
2014-05-17 04:39:27 +08:00
|
|
|
// FIXME: Reject the 2-field form in LLVM 4.0.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(STy &&
|
|
|
|
(STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
|
|
|
|
STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
|
|
|
|
STy->getTypeAtIndex(1) == FuncPtrTy,
|
|
|
|
"wrong type for intrinsic global variable", &GV);
|
2014-05-17 04:39:27 +08:00
|
|
|
if (STy->getNumElements() == 3) {
|
|
|
|
Type *ETy = STy->getTypeAtIndex(2);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ETy->isPointerTy() &&
|
|
|
|
cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
|
|
|
|
"wrong type for intrinsic global variable", &GV);
|
2014-05-17 04:39:27 +08:00
|
|
|
}
|
2011-04-08 15:30:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-22 23:16:51 +08:00
|
|
|
if (GV.hasName() && (GV.getName() == "llvm.used" ||
|
2013-07-20 02:44:51 +08:00
|
|
|
GV.getName() == "llvm.compiler.used")) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
|
|
|
|
"invalid linkage for intrinsic global variable", &GV);
|
2015-05-21 05:46:30 +08:00
|
|
|
Type *GVType = GV.getValueType();
|
2013-04-22 22:58:02 +08:00
|
|
|
if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
|
|
|
|
PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(PTy, "wrong type for intrinsic global variable", &GV);
|
2013-04-22 22:58:02 +08:00
|
|
|
if (GV.hasInitializer()) {
|
2014-01-19 10:22:18 +08:00
|
|
|
const Constant *Init = GV.getInitializer();
|
|
|
|
const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(InitArray, "wrong initalizer for intrinsic global variable",
|
|
|
|
Init);
|
2016-02-26 00:44:27 +08:00
|
|
|
for (Value *Op : InitArray->operands()) {
|
|
|
|
Value *V = Op->stripPointerCastsNoFollowAliases();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
|
|
|
|
isa<GlobalAlias>(V),
|
|
|
|
"invalid llvm.used member", V);
|
|
|
|
Assert(V->hasName(), "members of llvm.used must be named", V);
|
2013-04-22 22:58:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!GV.hasDLLImportStorageClass() ||
|
|
|
|
(GV.isDeclaration() && GV.hasExternalLinkage()) ||
|
|
|
|
GV.hasAvailableExternallyLinkage(),
|
|
|
|
"Global is marked as dllimport, but not external", &GV);
|
2014-01-14 23:22:47 +08:00
|
|
|
|
2013-08-01 01:49:08 +08:00
|
|
|
if (!GV.hasInitializer()) {
|
|
|
|
visitGlobalValue(GV);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Walk any aggregate initializers looking for bitcasts between address spaces
|
2015-12-11 01:56:06 +08:00
|
|
|
visitConstantExprsRecursively(GV.getInitializer());
|
2013-08-01 01:49:08 +08:00
|
|
|
|
2004-12-16 04:23:49 +08:00
|
|
|
visitGlobalValue(GV);
|
|
|
|
}
|
|
|
|
|
2014-06-03 10:41:57 +08:00
|
|
|
void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
|
|
|
|
SmallPtrSet<const GlobalAlias*, 4> Visited;
|
|
|
|
Visited.insert(&GA);
|
|
|
|
visitAliaseeSubExpr(Visited, GA, C);
|
|
|
|
}
|
|
|
|
|
2014-08-21 13:55:13 +08:00
|
|
|
void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
|
2014-06-03 10:41:57 +08:00
|
|
|
const GlobalAlias &GA, const Constant &C) {
|
|
|
|
if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
|
2015-11-27 03:22:59 +08:00
|
|
|
Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition",
|
|
|
|
&GA);
|
2014-06-03 10:41:57 +08:00
|
|
|
|
|
|
|
if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
|
2014-06-03 10:41:57 +08:00
|
|
|
|
Don't IPO over functions that can be de-refined
Summary:
Fixes PR26774.
If you're aware of the issue, feel free to skip the "Motivation"
section and jump directly to "This patch".
Motivation:
I define "refinement" as discarding behaviors from a program that the
optimizer has license to discard. So transforming:
```
void f(unsigned x) {
unsigned t = 5 / x;
(void)t;
}
```
to
```
void f(unsigned x) { }
```
is refinement, since the behavior went from "if x == 0 then undefined
else nothing" to "nothing" (the optimizer has license to discard
undefined behavior).
Refinement is a fundamental aspect of many mid-level optimizations done
by LLVM. For instance, transforming `x == (x + 1)` to `false` also
involves refinement since the expression's value went from "if x is
`undef` then { `true` or `false` } else { `false` }" to "`false`" (by
definition, the optimizer has license to fold `undef` to any non-`undef`
value).
Unfortunately, refinement implies that the optimizer cannot assume
that the implementation of a function it can see has all of the
behavior an unoptimized or a differently optimized version of the same
function can have. This is a problem for functions with comdat
linkage, where a function can be replaced by an unoptimized or a
differently optimized version of the same source level function.
For instance, FunctionAttrs cannot assume a comdat function is
actually `readnone` even if it does not have any loads or stores in
it; since there may have been loads and stores in the "original
function" that were refined out in the currently visible variant, and
at the link step the linker may in fact choose an implementation with
a load or a store. As an example, consider a function that does two
atomic loads from the same memory location, and writes to memory only
if the two values are not equal. The optimizer is allowed to refine
this function by first CSE'ing the two loads, and the folding the
comparision to always report that the two values are equal. Such a
refined variant will look like it is `readonly`. However, the
unoptimized version of the function can still write to memory (since
the two loads //can// result in different values), and selecting the
unoptimized version at link time will retroactively invalidate
transforms we may have done under the assumption that the function
does not write to memory.
Note: this is not just a problem with atomics or with linking
differently optimized object files. See PR26774 for more realistic
examples that involved neither.
This patch:
This change introduces a new set of linkage types, predicated as
`GlobalValue::mayBeDerefined` that returns true if the linkage type
allows a function to be replaced by a differently optimized variant at
link time. It then changes a set of IPO passes to bail out if they see
such a function.
Reviewers: chandlerc, hfinkel, dexonsmith, joker.eph, rnk
Subscribers: mcrosier, llvm-commits
Differential Revision: http://reviews.llvm.org/D18634
llvm-svn: 265762
2016-04-08 08:48:30 +08:00
|
|
|
Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias",
|
2015-03-08 05:15:40 +08:00
|
|
|
&GA);
|
2014-06-12 09:46:54 +08:00
|
|
|
} else {
|
|
|
|
// Only continue verifying subexpressions of GlobalAliases.
|
|
|
|
// Do not recurse into global initializers.
|
|
|
|
return;
|
2014-06-03 10:41:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (const auto *CE = dyn_cast<ConstantExpr>(&C))
|
2015-12-11 01:56:06 +08:00
|
|
|
visitConstantExprsRecursively(CE);
|
2014-06-03 10:41:57 +08:00
|
|
|
|
|
|
|
for (const Use &U : C.operands()) {
|
|
|
|
Value *V = &*U;
|
|
|
|
if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
|
|
|
|
visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
|
|
|
|
else if (const auto *C2 = dyn_cast<Constant>(V))
|
|
|
|
visitAliaseeSubExpr(Visited, GA, *C2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(GlobalAlias::isValidLinkage(GA.getLinkage()),
|
|
|
|
"Alias should have private, internal, linkonce, weak, linkonce_odr, "
|
|
|
|
"weak_odr, or external linkage!",
|
|
|
|
&GA);
|
2014-01-19 10:22:18 +08:00
|
|
|
const Constant *Aliasee = GA.getAliasee();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Aliasee, "Aliasee cannot be NULL!", &GA);
|
|
|
|
Assert(GA.getType() == Aliasee->getType(),
|
|
|
|
"Alias and aliasee types should match!", &GA);
|
2014-03-13 02:08:14 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
|
|
|
|
"Aliasee should be either GlobalValue or ConstantExpr", &GA);
|
2013-07-21 01:46:05 +08:00
|
|
|
|
2014-06-03 10:41:57 +08:00
|
|
|
visitAliaseeSubExpr(GA, *Aliasee);
|
2008-03-22 16:36:14 +08:00
|
|
|
|
2007-04-25 22:27:10 +08:00
|
|
|
visitGlobalValue(GA);
|
|
|
|
}
|
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const MDNode *MD : NMD.operands()) {
|
2015-03-25 01:18:03 +08:00
|
|
|
if (NMD.getName() == "llvm.dbg.cu") {
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
|
2015-03-25 01:18:03 +08:00
|
|
|
}
|
|
|
|
|
2015-03-31 10:27:32 +08:00
|
|
|
if (!MD)
|
|
|
|
continue;
|
|
|
|
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
visitMDNode(*MD);
|
2010-04-30 00:10:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-10 05:30:05 +08:00
|
|
|
void Verifier::visitMDNode(const MDNode &MD) {
|
2010-04-30 00:10:30 +08:00
|
|
|
// Only visit each node once. Metadata can be mutually recursive, so this
|
|
|
|
// avoids infinite recursion here, as well as being an optimization.
|
2014-11-19 15:49:26 +08:00
|
|
|
if (!MDNodes.insert(&MD).second)
|
2010-04-30 00:10:30 +08:00
|
|
|
return;
|
|
|
|
|
2015-02-10 09:09:50 +08:00
|
|
|
switch (MD.getMetadataID()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Invalid MDNode subclass");
|
|
|
|
case Metadata::MDTupleKind:
|
|
|
|
break;
|
|
|
|
#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
|
|
|
|
case Metadata::CLASS##Kind: \
|
|
|
|
visit##CLASS(cast<CLASS>(MD)); \
|
|
|
|
break;
|
|
|
|
#include "llvm/IR/Metadata.def"
|
|
|
|
}
|
|
|
|
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const Metadata *Op : MD.operands()) {
|
2010-04-30 00:10:30 +08:00
|
|
|
if (!Op)
|
|
|
|
continue;
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
|
|
|
|
&MD, Op);
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
if (auto *N = dyn_cast<MDNode>(Op)) {
|
|
|
|
visitMDNode(*N);
|
2010-04-30 00:10:30 +08:00
|
|
|
continue;
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
}
|
|
|
|
if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
|
|
|
|
visitValueAsMetadata(*V, nullptr);
|
2010-04-30 00:10:30 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
|
|
|
|
// Check these last, so we diagnose problems in operands first.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD);
|
|
|
|
Assert(MD.isResolved(), "All nodes should be resolved!", &MD);
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
}
|
|
|
|
|
2015-02-10 05:30:05 +08:00
|
|
|
void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(MD.getValue(), "Expected valid value", &MD);
|
|
|
|
Assert(!MD.getValue()->getType()->isMetadataTy(),
|
|
|
|
"Unexpected metadata round-trip through values", &MD, MD.getValue());
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
|
|
|
|
auto *L = dyn_cast<LocalAsMetadata>(&MD);
|
|
|
|
if (!L)
|
|
|
|
return;
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(F, "function-local metadata used outside a function", L);
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
|
|
|
|
// If this was an instruction, bb, or argument, verify that it is in the
|
|
|
|
// function that we expect.
|
|
|
|
Function *ActualF = nullptr;
|
|
|
|
if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(I->getParent(), "function-local metadata not in basic block", L, I);
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
ActualF = I->getParent()->getParent();
|
|
|
|
} else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
|
|
|
|
ActualF = BB->getParent();
|
|
|
|
else if (Argument *A = dyn_cast<Argument>(L->getValue()))
|
|
|
|
ActualF = A->getParent();
|
|
|
|
assert(ActualF && "Unimplemented function local metadata case!");
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ActualF == F, "function-local metadata used in wrong function", L);
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
}
|
|
|
|
|
2015-02-10 05:30:05 +08:00
|
|
|
void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
Metadata *MD = MDV.getMetadata();
|
|
|
|
if (auto *N = dyn_cast<MDNode>(MD)) {
|
|
|
|
visitMDNode(*N);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only visit each node once. Metadata can be mutually recursive, so this
|
|
|
|
// avoids infinite recursion here, as well as being an optimization.
|
|
|
|
if (!MDNodes.insert(MD).second)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (auto *V = dyn_cast<ValueAsMetadata>(MD))
|
|
|
|
visitValueAsMetadata(*V, F);
|
2010-04-30 00:10:30 +08:00
|
|
|
}
|
|
|
|
|
2016-04-24 05:08:00 +08:00
|
|
|
static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
|
|
|
|
static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
|
|
|
|
static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
|
2015-03-31 01:21:38 +08:00
|
|
|
|
2015-03-31 00:19:15 +08:00
|
|
|
template <class Ty>
|
|
|
|
bool isValidMetadataArrayImpl(const MDTuple &N, bool AllowNull) {
|
|
|
|
for (Metadata *MD : N.operands()) {
|
|
|
|
if (MD) {
|
|
|
|
if (!isa<Ty>(MD))
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
if (!AllowNull)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class Ty>
|
|
|
|
bool isValidMetadataArray(const MDTuple &N) {
|
|
|
|
return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ false);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class Ty>
|
|
|
|
bool isValidMetadataNullArray(const MDTuple &N) {
|
|
|
|
return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ true);
|
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDILocation(const DILocation &N) {
|
|
|
|
Assert(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
|
2015-03-27 06:05:04 +08:00
|
|
|
"location requires a valid scope", &N, N.getRawScope());
|
|
|
|
if (auto *IA = N.getRawInlinedAt())
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
|
2015-02-10 09:32:56 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitGenericDINode(const GenericDINode &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag(), "invalid tag", &N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDIScope(const DIScope &N) {
|
2015-03-28 07:05:04 +08:00
|
|
|
if (auto *F = N.getRawFile())
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DIFile>(F), "invalid file", &N, F);
|
2015-03-28 07:05:04 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDISubrange(const DISubrange &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
|
2015-03-28 07:05:04 +08:00
|
|
|
Assert(N.getCount() >= -1, "invalid subrange count", &N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDIEnumerator(const DIEnumerator &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDIBasicType(const DIBasicType &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_base_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_unspecified_type,
|
|
|
|
"invalid tag", &N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-07-25 04:16:36 +08:00
|
|
|
void Verifier::visitDIDerivedType(const DIDerivedType &N) {
|
2015-03-28 07:05:04 +08:00
|
|
|
// Common scope checks.
|
2015-04-30 00:38:44 +08:00
|
|
|
visitDIScope(N);
|
2015-03-28 07:05:04 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_typedef ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_pointer_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_reference_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_const_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_volatile_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_restrict_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_member ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_inheritance ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_friend,
|
|
|
|
"invalid tag", &N);
|
2015-03-31 08:47:15 +08:00
|
|
|
if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
|
2016-04-24 05:08:00 +08:00
|
|
|
Assert(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
|
|
|
|
N.getRawExtraData());
|
2015-03-31 08:47:15 +08:00
|
|
|
}
|
2015-07-25 04:16:36 +08:00
|
|
|
|
2016-04-24 05:08:00 +08:00
|
|
|
Assert(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
|
|
|
|
Assert(isType(N.getRawBaseType()), "invalid base type", &N,
|
|
|
|
N.getRawBaseType());
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-03-31 09:28:58 +08:00
|
|
|
static bool hasConflictingReferenceFlags(unsigned Flags) {
|
2015-04-30 00:38:44 +08:00
|
|
|
return (Flags & DINode::FlagLValueReference) &&
|
|
|
|
(Flags & DINode::FlagRValueReference);
|
2015-03-31 09:28:58 +08:00
|
|
|
}
|
|
|
|
|
2015-04-07 01:04:58 +08:00
|
|
|
void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
|
|
|
|
auto *Params = dyn_cast<MDTuple>(&RawParams);
|
|
|
|
Assert(Params, "invalid template params", &N, &RawParams);
|
|
|
|
for (Metadata *Op : Params->operands()) {
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(Op && isa<DITemplateParameter>(Op), "invalid template parameter", &N,
|
2015-04-07 01:04:58 +08:00
|
|
|
Params, Op);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDICompositeType(const DICompositeType &N) {
|
2015-07-25 04:16:36 +08:00
|
|
|
// Common scope checks.
|
|
|
|
visitDIScope(N);
|
2015-03-28 07:05:04 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_array_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_structure_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_union_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_enumeration_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_class_type,
|
|
|
|
"invalid tag", &N);
|
2015-03-28 07:05:04 +08:00
|
|
|
|
2016-04-24 05:08:00 +08:00
|
|
|
Assert(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
|
|
|
|
Assert(isType(N.getRawBaseType()), "invalid base type", &N,
|
|
|
|
N.getRawBaseType());
|
2015-07-25 04:16:36 +08:00
|
|
|
|
2015-03-28 07:05:04 +08:00
|
|
|
Assert(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
|
|
|
|
"invalid composite elements", &N, N.getRawElements());
|
2016-04-24 05:08:00 +08:00
|
|
|
Assert(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
|
2015-03-28 07:05:04 +08:00
|
|
|
N.getRawVTableHolder());
|
2015-03-31 09:28:58 +08:00
|
|
|
Assert(!hasConflictingReferenceFlags(N.getFlags()), "invalid reference flags",
|
|
|
|
&N);
|
2015-04-07 01:04:58 +08:00
|
|
|
if (auto *Params = N.getRawTemplateParams())
|
|
|
|
visitTemplateParams(N, *Params);
|
2015-07-25 03:57:19 +08:00
|
|
|
|
|
|
|
if (N.getTag() == dwarf::DW_TAG_class_type ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_union_type) {
|
|
|
|
Assert(N.getFile() && !N.getFile()->getFilename().empty(),
|
|
|
|
"class/union requires a filename", &N, N.getFile());
|
|
|
|
}
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDISubroutineType(const DISubroutineType &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
|
2015-03-28 10:43:53 +08:00
|
|
|
if (auto *Types = N.getRawTypeArray()) {
|
|
|
|
Assert(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
|
|
|
|
for (Metadata *Ty : N.getTypeArray()->operands()) {
|
2016-04-24 05:08:00 +08:00
|
|
|
Assert(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
|
2015-03-28 10:43:53 +08:00
|
|
|
}
|
2015-03-28 07:05:04 +08:00
|
|
|
}
|
2015-03-31 09:28:58 +08:00
|
|
|
Assert(!hasConflictingReferenceFlags(N.getFlags()), "invalid reference flags",
|
|
|
|
&N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDIFile(const DIFile &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDICompileUnit(const DICompileUnit &N) {
|
2015-08-04 01:26:41 +08:00
|
|
|
Assert(N.isDistinct(), "compile units must be distinct", &N);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
|
2015-03-28 07:05:04 +08:00
|
|
|
|
2015-03-31 08:47:15 +08:00
|
|
|
// Don't bother verifying the compilation directory or producer string
|
|
|
|
// as those could be empty.
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
|
|
|
|
N.getRawFile());
|
2015-03-31 08:47:15 +08:00
|
|
|
Assert(!N.getFile()->getFilename().empty(), "invalid filename", &N,
|
|
|
|
N.getFile());
|
|
|
|
|
2016-04-01 07:56:58 +08:00
|
|
|
Assert((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
|
|
|
|
"invalid emission kind", &N);
|
|
|
|
|
2015-03-28 07:05:04 +08:00
|
|
|
if (auto *Array = N.getRawEnumTypes()) {
|
|
|
|
Assert(isa<MDTuple>(Array), "invalid enum list", &N, Array);
|
|
|
|
for (Metadata *Op : N.getEnumTypes()->operands()) {
|
2015-04-30 00:38:44 +08:00
|
|
|
auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
|
2015-03-28 07:05:04 +08:00
|
|
|
Assert(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
|
|
|
|
"invalid enum type", &N, N.getEnumTypes(), Op);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (auto *Array = N.getRawRetainedTypes()) {
|
|
|
|
Assert(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
|
|
|
|
for (Metadata *Op : N.getRetainedTypes()->operands()) {
|
2016-04-15 23:57:41 +08:00
|
|
|
Assert(Op && (isa<DIType>(Op) ||
|
|
|
|
(isa<DISubprogram>(Op) &&
|
|
|
|
cast<DISubprogram>(Op)->isDefinition() == false)),
|
|
|
|
"invalid retained type", &N, Op);
|
2015-03-28 07:05:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (auto *Array = N.getRawGlobalVariables()) {
|
|
|
|
Assert(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
|
|
|
|
for (Metadata *Op : N.getGlobalVariables()->operands()) {
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(Op && isa<DIGlobalVariable>(Op), "invalid global variable ref", &N,
|
2015-03-28 07:05:04 +08:00
|
|
|
Op);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (auto *Array = N.getRawImportedEntities()) {
|
|
|
|
Assert(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
|
|
|
|
for (Metadata *Op : N.getImportedEntities()->operands()) {
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref", &N,
|
2015-03-28 07:05:04 +08:00
|
|
|
Op);
|
|
|
|
}
|
|
|
|
}
|
2015-12-10 20:56:35 +08:00
|
|
|
if (auto *Array = N.getRawMacros()) {
|
|
|
|
Assert(isa<MDTuple>(Array), "invalid macro list", &N, Array);
|
|
|
|
for (Metadata *Op : N.getMacros()->operands()) {
|
|
|
|
Assert(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
|
|
|
|
}
|
|
|
|
}
|
2016-03-29 05:06:26 +08:00
|
|
|
CUVisited.insert(&N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDISubprogram(const DISubprogram &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
|
2016-04-24 05:08:00 +08:00
|
|
|
Assert(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
|
2016-04-07 02:46:39 +08:00
|
|
|
if (auto *F = N.getRawFile())
|
|
|
|
Assert(isa<DIFile>(F), "invalid file", &N, F);
|
2015-03-31 00:19:15 +08:00
|
|
|
if (auto *T = N.getRawType())
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
|
2016-04-24 05:08:00 +08:00
|
|
|
Assert(isType(N.getRawContainingType()), "invalid containing type", &N,
|
2015-03-31 00:19:15 +08:00
|
|
|
N.getRawContainingType());
|
2015-04-07 01:04:58 +08:00
|
|
|
if (auto *Params = N.getRawTemplateParams())
|
|
|
|
visitTemplateParams(N, *Params);
|
2016-04-15 23:57:41 +08:00
|
|
|
if (auto *S = N.getRawDeclaration())
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
|
2015-03-31 00:19:15 +08:00
|
|
|
"invalid subprogram declaration", &N, S);
|
2015-04-07 01:04:58 +08:00
|
|
|
if (auto *RawVars = N.getRawVariables()) {
|
|
|
|
auto *Vars = dyn_cast<MDTuple>(RawVars);
|
|
|
|
Assert(Vars, "invalid variable list", &N, RawVars);
|
2015-03-31 00:19:15 +08:00
|
|
|
for (Metadata *Op : Vars->operands()) {
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(Op && isa<DILocalVariable>(Op), "invalid local variable", &N, Vars,
|
2015-03-31 00:19:15 +08:00
|
|
|
Op);
|
|
|
|
}
|
|
|
|
}
|
2015-03-31 09:28:58 +08:00
|
|
|
Assert(!hasConflictingReferenceFlags(N.getFlags()), "invalid reference flags",
|
|
|
|
&N);
|
2015-03-31 10:09:55 +08:00
|
|
|
|
2016-04-15 23:57:41 +08:00
|
|
|
auto *Unit = N.getRawUnit();
|
|
|
|
if (N.isDefinition()) {
|
|
|
|
// Subprogram definitions (not part of the type hierarchy).
|
2015-08-29 04:26:49 +08:00
|
|
|
Assert(N.isDistinct(), "subprogram definitions must be distinct", &N);
|
2016-04-15 23:57:41 +08:00
|
|
|
Assert(Unit, "subprogram definitions must have a compile unit", &N);
|
|
|
|
Assert(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
|
|
|
|
} else {
|
|
|
|
// Subprogram declarations (part of the type hierarchy).
|
|
|
|
Assert(!Unit, "subprogram declarations must not have a compile unit", &N);
|
|
|
|
}
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
|
2015-03-31 00:37:48 +08:00
|
|
|
"invalid local scope", &N, N.getRawScope());
|
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
|
|
|
|
visitDILexicalBlockBase(N);
|
2015-03-31 00:37:48 +08:00
|
|
|
|
|
|
|
Assert(N.getLine() || !N.getColumn(),
|
|
|
|
"cannot have column info without line info", &N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
|
|
|
|
visitDILexicalBlockBase(N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDINamespace(const DINamespace &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
|
2015-03-31 01:21:38 +08:00
|
|
|
if (auto *S = N.getRawScope())
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DIScope>(S), "invalid scope ref", &N, S);
|
2015-03-31 01:21:38 +08:00
|
|
|
}
|
|
|
|
|
2015-12-10 20:56:35 +08:00
|
|
|
void Verifier::visitDIMacro(const DIMacro &N) {
|
|
|
|
Assert(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
|
|
|
|
N.getMacinfoType() == dwarf::DW_MACINFO_undef,
|
|
|
|
"invalid macinfo type", &N);
|
|
|
|
Assert(!N.getName().empty(), "anonymous macro", &N);
|
2016-01-07 22:28:20 +08:00
|
|
|
if (!N.getValue().empty()) {
|
|
|
|
assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
|
|
|
|
}
|
2015-12-10 20:56:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitDIMacroFile(const DIMacroFile &N) {
|
|
|
|
Assert(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
|
|
|
|
"invalid macinfo type", &N);
|
|
|
|
if (auto *F = N.getRawFile())
|
|
|
|
Assert(isa<DIFile>(F), "invalid file", &N, F);
|
|
|
|
|
|
|
|
if (auto *Array = N.getRawElements()) {
|
|
|
|
Assert(isa<MDTuple>(Array), "invalid macro list", &N, Array);
|
|
|
|
for (Metadata *Op : N.getElements()->operands()) {
|
|
|
|
Assert(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-30 07:03:47 +08:00
|
|
|
void Verifier::visitDIModule(const DIModule &N) {
|
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
|
|
|
|
Assert(!N.getName().empty(), "anonymous module", &N);
|
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
|
2016-04-24 05:08:00 +08:00
|
|
|
Assert(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
|
|
|
|
visitDITemplateParameter(N);
|
2015-03-31 01:21:38 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
|
|
|
|
&N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDITemplateValueParameter(
|
|
|
|
const DITemplateValueParameter &N) {
|
|
|
|
visitDITemplateParameter(N);
|
2015-03-31 01:21:38 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
|
|
|
|
"invalid tag", &N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDIVariable(const DIVariable &N) {
|
2015-03-28 01:29:58 +08:00
|
|
|
if (auto *S = N.getRawScope())
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DIScope>(S), "invalid scope", &N, S);
|
2016-04-24 05:08:00 +08:00
|
|
|
Assert(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
|
2015-03-28 01:29:58 +08:00
|
|
|
if (auto *F = N.getRawFile())
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DIFile>(F), "invalid file", &N, F);
|
2015-03-28 01:29:58 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
|
2015-03-28 01:29:58 +08:00
|
|
|
// Checks common to all variables.
|
2015-04-30 00:38:44 +08:00
|
|
|
visitDIVariable(N);
|
2015-03-28 01:29:58 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
|
2015-03-31 09:28:22 +08:00
|
|
|
Assert(!N.getName().empty(), "missing global variable name", &N);
|
2015-03-28 01:29:58 +08:00
|
|
|
if (auto *V = N.getRawVariable()) {
|
|
|
|
Assert(isa<ConstantAsMetadata>(V) &&
|
|
|
|
!isa<Function>(cast<ConstantAsMetadata>(V)->getValue()),
|
|
|
|
"invalid global varaible ref", &N, V);
|
2016-01-15 06:42:02 +08:00
|
|
|
visitConstantExprsRecursively(cast<ConstantAsMetadata>(V)->getValue());
|
2015-03-28 01:29:58 +08:00
|
|
|
}
|
|
|
|
if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DIDerivedType>(Member), "invalid static data member declaration",
|
2015-03-28 01:29:58 +08:00
|
|
|
&N, Member);
|
|
|
|
}
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDILocalVariable(const DILocalVariable &N) {
|
2015-03-28 01:29:58 +08:00
|
|
|
// Checks common to all variables.
|
2015-04-30 00:38:44 +08:00
|
|
|
visitDIVariable(N);
|
2015-03-28 01:29:58 +08:00
|
|
|
|
2015-08-01 02:58:39 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
|
2015-03-28 01:29:58 +08:00
|
|
|
"local variable requires a valid scope", &N, N.getRawScope());
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDIExpression(const DIExpression &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.isValid(), "invalid expression", &N);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
|
2015-03-31 01:21:38 +08:00
|
|
|
if (auto *T = N.getRawType())
|
2016-04-24 05:08:00 +08:00
|
|
|
Assert(isType(T), "invalid type ref", &N, T);
|
2015-03-31 01:21:38 +08:00
|
|
|
if (auto *F = N.getRawFile())
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DIFile>(F), "invalid file", &N, F);
|
2015-02-10 09:40:40 +08:00
|
|
|
}
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N.getTag() == dwarf::DW_TAG_imported_module ||
|
|
|
|
N.getTag() == dwarf::DW_TAG_imported_declaration,
|
|
|
|
"invalid tag", &N);
|
2015-03-31 01:21:38 +08:00
|
|
|
if (auto *S = N.getRawScope())
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
|
2016-04-24 05:08:00 +08:00
|
|
|
Assert(isDINode(N.getRawEntity()), "invalid imported entity", &N,
|
|
|
|
N.getRawEntity());
|
2015-02-10 09:09:50 +08:00
|
|
|
}
|
|
|
|
|
2014-06-28 02:19:56 +08:00
|
|
|
void Verifier::visitComdat(const Comdat &C) {
|
2014-07-13 12:56:11 +08:00
|
|
|
// The Module is invalid if the GlobalValue has private linkage. Entities
|
|
|
|
// with private linkage don't have entries in the symbol table.
|
2015-02-21 03:58:48 +08:00
|
|
|
if (const GlobalValue *GV = M->getNamedValue(C.getName()))
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
|
|
|
|
GV);
|
2014-06-28 02:19:56 +08:00
|
|
|
}
|
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
void Verifier::visitModuleIdents(const Module &M) {
|
2013-10-16 09:49:05 +08:00
|
|
|
const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
|
|
|
|
if (!Idents)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// llvm.ident takes a list of metadata entry. Each entry has only one string.
|
|
|
|
// Scan each llvm.ident entry and make sure that this requirement is met.
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const MDNode *N : Idents->operands()) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N->getNumOperands() == 1,
|
|
|
|
"incorrect number of operands in llvm.ident metadata", N);
|
|
|
|
Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
|
|
|
|
("invalid value for llvm.ident metadata entry operand"
|
|
|
|
"(the operand should be a string)"),
|
|
|
|
N->getOperand(0));
|
2013-10-16 09:49:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
void Verifier::visitModuleFlags(const Module &M) {
|
2013-01-15 09:22:53 +08:00
|
|
|
const NamedMDNode *Flags = M.getModuleFlagsMetadata();
|
|
|
|
if (!Flags) return;
|
|
|
|
|
2013-01-16 04:52:06 +08:00
|
|
|
// Scan each flag, and track the flags and requirements.
|
2014-01-19 10:22:18 +08:00
|
|
|
DenseMap<const MDString*, const MDNode*> SeenIDs;
|
|
|
|
SmallVector<const MDNode*, 16> Requirements;
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const MDNode *MDN : Flags->operands())
|
|
|
|
visitModuleFlag(MDN, SeenIDs, Requirements);
|
2013-01-16 04:52:06 +08:00
|
|
|
|
|
|
|
// Validate that the requirements in the module are valid.
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const MDNode *Requirement : Requirements) {
|
2014-01-19 10:22:18 +08:00
|
|
|
const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
const Metadata *ReqValue = Requirement->getOperand(1);
|
2013-01-16 04:52:06 +08:00
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
const MDNode *Op = SeenIDs.lookup(Flag);
|
2013-01-16 04:52:06 +08:00
|
|
|
if (!Op) {
|
|
|
|
CheckFailed("invalid requirement on flag, flag is not present in module",
|
|
|
|
Flag);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Op->getOperand(2) != ReqValue) {
|
|
|
|
CheckFailed(("invalid requirement on flag, "
|
|
|
|
"flag does not have the required value"),
|
|
|
|
Flag);
|
|
|
|
continue;
|
|
|
|
}
|
2013-01-15 09:22:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
void
|
|
|
|
Verifier::visitModuleFlag(const MDNode *Op,
|
|
|
|
DenseMap<const MDString *, const MDNode *> &SeenIDs,
|
|
|
|
SmallVectorImpl<const MDNode *> &Requirements) {
|
2013-01-15 09:22:53 +08:00
|
|
|
// Each module flag should have three arguments, the merge behavior (a
|
|
|
|
// constant int), the flag ID (an MDString), and the value.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Op->getNumOperands() == 3,
|
|
|
|
"incorrect number of operands in module flag", Op);
|
2014-09-09 03:16:28 +08:00
|
|
|
Module::ModFlagBehavior MFB;
|
|
|
|
if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(
|
2015-02-11 17:13:06 +08:00
|
|
|
mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
|
2014-09-09 03:16:28 +08:00
|
|
|
"invalid behavior operand in module flag (expected constant integer)",
|
|
|
|
Op->getOperand(0));
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(false,
|
|
|
|
"invalid behavior operand in module flag (unexpected constant)",
|
|
|
|
Op->getOperand(0));
|
2014-09-09 03:16:28 +08:00
|
|
|
}
|
2015-02-16 16:14:22 +08:00
|
|
|
MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ID, "invalid ID operand in module flag (expected metadata string)",
|
|
|
|
Op->getOperand(1));
|
2013-01-15 09:22:53 +08:00
|
|
|
|
2013-01-17 05:38:56 +08:00
|
|
|
// Sanity check the values for behaviors with additional requirements.
|
2014-09-09 03:16:28 +08:00
|
|
|
switch (MFB) {
|
2013-01-17 05:38:56 +08:00
|
|
|
case Module::Error:
|
|
|
|
case Module::Warning:
|
|
|
|
case Module::Override:
|
|
|
|
// These behavior types accept any value.
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Module::Require: {
|
2013-01-15 09:22:53 +08:00
|
|
|
// The value should itself be an MDNode with two operands, a flag ID (an
|
|
|
|
// MDString), and a value.
|
|
|
|
MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Value && Value->getNumOperands() == 2,
|
|
|
|
"invalid value for 'require' module flag (expected metadata pair)",
|
|
|
|
Op->getOperand(2));
|
|
|
|
Assert(isa<MDString>(Value->getOperand(0)),
|
|
|
|
("invalid value for 'require' module flag "
|
|
|
|
"(first value operand should be a string)"),
|
|
|
|
Value->getOperand(0));
|
2013-01-16 04:52:06 +08:00
|
|
|
|
|
|
|
// Append it to the list of requirements, to check once all module flags are
|
|
|
|
// scanned.
|
|
|
|
Requirements.push_back(Value);
|
2013-01-17 05:38:56 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Module::Append:
|
|
|
|
case Module::AppendUnique: {
|
|
|
|
// These behavior types require the operand be an MDNode.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(isa<MDNode>(Op->getOperand(2)),
|
|
|
|
"invalid value for 'append'-type module flag "
|
|
|
|
"(expected a metadata node)",
|
|
|
|
Op->getOperand(2));
|
2013-01-17 05:38:56 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unless this is a "requires" flag, check the ID is unique.
|
2014-09-09 03:16:28 +08:00
|
|
|
if (MFB != Module::Require) {
|
2013-01-17 05:38:56 +08:00
|
|
|
bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Inserted,
|
|
|
|
"module flag identifiers must be unique (or of 'require' type)", ID);
|
2013-01-15 09:22:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
void Verifier::verifyAttributeTypes(AttributeSet Attrs, unsigned Idx,
|
2013-07-06 08:29:58 +08:00
|
|
|
bool isFunction, const Value *V) {
|
2013-04-19 04:15:25 +08:00
|
|
|
unsigned Slot = ~0U;
|
|
|
|
for (unsigned I = 0, E = Attrs.getNumSlots(); I != E; ++I)
|
|
|
|
if (Attrs.getSlotIndex(I) == Idx) {
|
|
|
|
Slot = I;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(Slot != ~0U && "Attribute set inconsistency!");
|
|
|
|
|
|
|
|
for (AttributeSet::iterator I = Attrs.begin(Slot), E = Attrs.end(Slot);
|
|
|
|
I != E; ++I) {
|
|
|
|
if (I->isStringAttribute())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (I->getKindAsEnum() == Attribute::NoReturn ||
|
|
|
|
I->getKindAsEnum() == Attribute::NoUnwind ||
|
|
|
|
I->getKindAsEnum() == Attribute::NoInline ||
|
|
|
|
I->getKindAsEnum() == Attribute::AlwaysInline ||
|
|
|
|
I->getKindAsEnum() == Attribute::OptimizeForSize ||
|
|
|
|
I->getKindAsEnum() == Attribute::StackProtect ||
|
|
|
|
I->getKindAsEnum() == Attribute::StackProtectReq ||
|
|
|
|
I->getKindAsEnum() == Attribute::StackProtectStrong ||
|
Protection against stack-based memory corruption errors using SafeStack
This patch adds the safe stack instrumentation pass to LLVM, which separates
the program stack into a safe stack, which stores return addresses, register
spills, and local variables that are statically verified to be accessed
in a safe way, and the unsafe stack, which stores everything else. Such
separation makes it much harder for an attacker to corrupt objects on the
safe stack, including function pointers stored in spilled registers and
return addresses. You can find more information about the safe stack, as
well as other parts of or control-flow hijack protection technique in our
OSDI paper on code-pointer integrity (http://dslab.epfl.ch/pubs/cpi.pdf)
and our project website (http://levee.epfl.ch).
The overhead of our implementation of the safe stack is very close to zero
(0.01% on the Phoronix benchmarks). This is lower than the overhead of
stack cookies, which are supported by LLVM and are commonly used today,
yet the security guarantees of the safe stack are strictly stronger than
stack cookies. In some cases, the safe stack improves performance due to
better cache locality.
Our current implementation of the safe stack is stable and robust, we
used it to recompile multiple projects on Linux including Chromium, and
we also recompiled the entire FreeBSD user-space system and more than 100
packages. We ran unit tests on the FreeBSD system and many of the packages
and observed no errors caused by the safe stack. The safe stack is also fully
binary compatible with non-instrumented code and can be applied to parts of
a program selectively.
This patch is our implementation of the safe stack on top of LLVM. The
patches make the following changes:
- Add the safestack function attribute, similar to the ssp, sspstrong and
sspreq attributes.
- Add the SafeStack instrumentation pass that applies the safe stack to all
functions that have the safestack attribute. This pass moves all unsafe local
variables to the unsafe stack with a separate stack pointer, whereas all
safe variables remain on the regular stack that is managed by LLVM as usual.
- Invoke the pass as the last stage before code generation (at the same time
the existing cookie-based stack protector pass is invoked).
- Add unit tests for the safe stack.
Original patch by Volodymyr Kuznetsov and others at the Dependable Systems
Lab at EPFL; updates and upstreaming by myself.
Differential Revision: http://reviews.llvm.org/D6094
llvm-svn: 239761
2015-06-16 05:07:11 +08:00
|
|
|
I->getKindAsEnum() == Attribute::SafeStack ||
|
2013-04-19 04:15:25 +08:00
|
|
|
I->getKindAsEnum() == Attribute::NoRedZone ||
|
|
|
|
I->getKindAsEnum() == Attribute::NoImplicitFloat ||
|
|
|
|
I->getKindAsEnum() == Attribute::Naked ||
|
|
|
|
I->getKindAsEnum() == Attribute::InlineHint ||
|
|
|
|
I->getKindAsEnum() == Attribute::StackAlignment ||
|
|
|
|
I->getKindAsEnum() == Attribute::UWTable ||
|
|
|
|
I->getKindAsEnum() == Attribute::NonLazyBind ||
|
|
|
|
I->getKindAsEnum() == Attribute::ReturnsTwice ||
|
|
|
|
I->getKindAsEnum() == Attribute::SanitizeAddress ||
|
|
|
|
I->getKindAsEnum() == Attribute::SanitizeThread ||
|
|
|
|
I->getKindAsEnum() == Attribute::SanitizeMemory ||
|
|
|
|
I->getKindAsEnum() == Attribute::MinSize ||
|
|
|
|
I->getKindAsEnum() == Attribute::NoDuplicate ||
|
2013-06-27 08:25:01 +08:00
|
|
|
I->getKindAsEnum() == Attribute::Builtin ||
|
2013-05-24 20:26:52 +08:00
|
|
|
I->getKindAsEnum() == Attribute::NoBuiltin ||
|
2013-08-23 19:53:55 +08:00
|
|
|
I->getKindAsEnum() == Attribute::Cold ||
|
2014-06-06 03:29:43 +08:00
|
|
|
I->getKindAsEnum() == Attribute::OptimizeNone ||
|
2015-05-27 07:48:40 +08:00
|
|
|
I->getKindAsEnum() == Attribute::JumpTable ||
|
2015-07-11 18:30:36 +08:00
|
|
|
I->getKindAsEnum() == Attribute::Convergent ||
|
2015-11-06 18:32:53 +08:00
|
|
|
I->getKindAsEnum() == Attribute::ArgMemOnly ||
|
2015-12-17 00:16:19 +08:00
|
|
|
I->getKindAsEnum() == Attribute::NoRecurse ||
|
|
|
|
I->getKindAsEnum() == Attribute::InaccessibleMemOnly ||
|
2016-04-12 09:05:35 +08:00
|
|
|
I->getKindAsEnum() == Attribute::InaccessibleMemOrArgMemOnly ||
|
|
|
|
I->getKindAsEnum() == Attribute::AllocSize) {
|
2013-07-02 11:28:10 +08:00
|
|
|
if (!isFunction) {
|
2013-07-06 08:29:58 +08:00
|
|
|
CheckFailed("Attribute '" + I->getAsString() +
|
|
|
|
"' only applies to functions!", V);
|
|
|
|
return;
|
2013-07-02 11:28:10 +08:00
|
|
|
}
|
2013-07-06 08:29:58 +08:00
|
|
|
} else if (I->getKindAsEnum() == Attribute::ReadOnly ||
|
|
|
|
I->getKindAsEnum() == Attribute::ReadNone) {
|
|
|
|
if (Idx == 0) {
|
2013-06-19 05:27:00 +08:00
|
|
|
CheckFailed("Attribute '" + I->getAsString() +
|
2013-07-06 08:29:58 +08:00
|
|
|
"' does not apply to function returns");
|
2013-04-19 04:15:25 +08:00
|
|
|
return;
|
2013-07-06 08:29:58 +08:00
|
|
|
}
|
|
|
|
} else if (isFunction) {
|
|
|
|
CheckFailed("Attribute '" + I->getAsString() +
|
|
|
|
"' does not apply to functions!", V);
|
|
|
|
return;
|
2013-04-19 04:15:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-11 16:11:03 +08:00
|
|
|
// VerifyParameterAttrs - Check the given attributes for an argument or return
|
2008-01-13 00:42:01 +08:00
|
|
|
// value of the specified type. The value V is printed in error messages.
|
2016-02-01 00:32:23 +08:00
|
|
|
void Verifier::verifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
|
2009-06-11 16:11:03 +08:00
|
|
|
bool isReturnValue, const Value *V) {
|
2013-01-22 07:03:18 +08:00
|
|
|
if (!Attrs.hasAttributes(Idx))
|
2008-01-13 00:42:01 +08:00
|
|
|
return;
|
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyAttributeTypes(Attrs, Idx, false, V);
|
2009-06-11 16:11:03 +08:00
|
|
|
|
2012-10-09 17:51:10 +08:00
|
|
|
if (isReturnValue)
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) &&
|
|
|
|
!Attrs.hasAttribute(Idx, Attribute::Nest) &&
|
|
|
|
!Attrs.hasAttribute(Idx, Attribute::StructRet) &&
|
|
|
|
!Attrs.hasAttribute(Idx, Attribute::NoCapture) &&
|
|
|
|
!Attrs.hasAttribute(Idx, Attribute::Returned) &&
|
2016-03-30 01:37:21 +08:00
|
|
|
!Attrs.hasAttribute(Idx, Attribute::InAlloca) &&
|
2016-04-02 05:41:15 +08:00
|
|
|
!Attrs.hasAttribute(Idx, Attribute::SwiftSelf) &&
|
|
|
|
!Attrs.hasAttribute(Idx, Attribute::SwiftError),
|
2016-03-30 01:37:21 +08:00
|
|
|
"Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', "
|
2016-04-02 05:41:15 +08:00
|
|
|
"'returned', 'swiftself', and 'swifterror' do not apply to return "
|
2016-03-30 01:37:21 +08:00
|
|
|
"values!",
|
2015-03-08 05:15:40 +08:00
|
|
|
V);
|
2013-12-19 10:14:12 +08:00
|
|
|
|
|
|
|
// Check for mutually incompatible attributes. Only inreg is compatible with
|
|
|
|
// sret.
|
|
|
|
unsigned AttrCount = 0;
|
|
|
|
AttrCount += Attrs.hasAttribute(Idx, Attribute::ByVal);
|
|
|
|
AttrCount += Attrs.hasAttribute(Idx, Attribute::InAlloca);
|
|
|
|
AttrCount += Attrs.hasAttribute(Idx, Attribute::StructRet) ||
|
|
|
|
Attrs.hasAttribute(Idx, Attribute::InReg);
|
|
|
|
AttrCount += Attrs.hasAttribute(Idx, Attribute::Nest);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', "
|
|
|
|
"and 'sret' are incompatible!",
|
|
|
|
V);
|
|
|
|
|
|
|
|
Assert(!(Attrs.hasAttribute(Idx, Attribute::InAlloca) &&
|
|
|
|
Attrs.hasAttribute(Idx, Attribute::ReadOnly)),
|
|
|
|
"Attributes "
|
|
|
|
"'inalloca and readonly' are incompatible!",
|
|
|
|
V);
|
|
|
|
|
|
|
|
Assert(!(Attrs.hasAttribute(Idx, Attribute::StructRet) &&
|
|
|
|
Attrs.hasAttribute(Idx, Attribute::Returned)),
|
|
|
|
"Attributes "
|
|
|
|
"'sret and returned' are incompatible!",
|
|
|
|
V);
|
|
|
|
|
|
|
|
Assert(!(Attrs.hasAttribute(Idx, Attribute::ZExt) &&
|
|
|
|
Attrs.hasAttribute(Idx, Attribute::SExt)),
|
|
|
|
"Attributes "
|
|
|
|
"'zeroext and signext' are incompatible!",
|
|
|
|
V);
|
|
|
|
|
|
|
|
Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadNone) &&
|
|
|
|
Attrs.hasAttribute(Idx, Attribute::ReadOnly)),
|
|
|
|
"Attributes "
|
|
|
|
"'readnone and readonly' are incompatible!",
|
|
|
|
V);
|
|
|
|
|
|
|
|
Assert(!(Attrs.hasAttribute(Idx, Attribute::NoInline) &&
|
|
|
|
Attrs.hasAttribute(Idx, Attribute::AlwaysInline)),
|
|
|
|
"Attributes "
|
|
|
|
"'noinline and alwaysinline' are incompatible!",
|
|
|
|
V);
|
|
|
|
|
|
|
|
Assert(!AttrBuilder(Attrs, Idx)
|
2015-05-07 07:19:56 +08:00
|
|
|
.overlaps(AttributeFuncs::typeIncompatible(Ty)),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Wrong types for attribute: " +
|
2015-05-07 07:19:56 +08:00
|
|
|
AttributeSet::get(*Context, Idx,
|
|
|
|
AttributeFuncs::typeIncompatible(Ty)).getAsString(Idx),
|
2015-03-08 05:15:40 +08:00
|
|
|
V);
|
2008-08-27 22:48:06 +08:00
|
|
|
|
2013-12-19 10:14:12 +08:00
|
|
|
if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
|
2015-08-02 06:20:21 +08:00
|
|
|
SmallPtrSet<Type*, 4> Visited;
|
2015-03-13 14:41:26 +08:00
|
|
|
if (!PTy->getElementType()->isSized(&Visited)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) &&
|
|
|
|
!Attrs.hasAttribute(Idx, Attribute::InAlloca),
|
|
|
|
"Attributes 'byval' and 'inalloca' do not support unsized types!",
|
|
|
|
V);
|
2013-12-19 10:14:12 +08:00
|
|
|
}
|
2016-04-02 05:41:15 +08:00
|
|
|
if (!isa<PointerType>(PTy->getElementType()))
|
|
|
|
Assert(!Attrs.hasAttribute(Idx, Attribute::SwiftError),
|
|
|
|
"Attribute 'swifterror' only applies to parameters "
|
|
|
|
"with pointer to pointer type!",
|
|
|
|
V);
|
2013-12-19 10:14:12 +08:00
|
|
|
} else {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal),
|
|
|
|
"Attribute 'byval' only applies to parameters with pointer type!",
|
|
|
|
V);
|
2016-04-02 05:41:15 +08:00
|
|
|
Assert(!Attrs.hasAttribute(Idx, Attribute::SwiftError),
|
|
|
|
"Attribute 'swifterror' only applies to parameters "
|
|
|
|
"with pointer type!",
|
|
|
|
V);
|
2013-12-19 10:14:12 +08:00
|
|
|
}
|
2008-01-13 00:42:01 +08:00
|
|
|
}
|
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
// Check parameter attributes against a function type.
|
2007-12-22 03:19:01 +08:00
|
|
|
// The value V is printed in error messages.
|
2016-02-01 00:32:23 +08:00
|
|
|
void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
|
2008-01-13 00:42:01 +08:00
|
|
|
const Value *V) {
|
2008-03-13 01:45:29 +08:00
|
|
|
if (Attrs.isEmpty())
|
2007-12-22 03:19:01 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
bool SawNest = false;
|
2013-04-20 13:14:40 +08:00
|
|
|
bool SawReturned = false;
|
2014-05-10 06:32:13 +08:00
|
|
|
bool SawSRet = false;
|
2016-03-30 01:37:21 +08:00
|
|
|
bool SawSwiftSelf = false;
|
2016-04-02 05:41:15 +08:00
|
|
|
bool SawSwiftError = false;
|
2007-12-22 03:19:01 +08:00
|
|
|
|
2008-03-13 01:45:29 +08:00
|
|
|
for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
|
2013-04-19 04:15:25 +08:00
|
|
|
unsigned Idx = Attrs.getSlotIndex(i);
|
2007-12-22 03:19:01 +08:00
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *Ty;
|
2013-04-19 04:15:25 +08:00
|
|
|
if (Idx == 0)
|
2008-03-13 01:45:29 +08:00
|
|
|
Ty = FT->getReturnType();
|
2013-04-19 04:15:25 +08:00
|
|
|
else if (Idx-1 < FT->getNumParams())
|
|
|
|
Ty = FT->getParamType(Idx-1);
|
2008-03-13 01:45:29 +08:00
|
|
|
else
|
2009-06-11 16:11:03 +08:00
|
|
|
break; // VarArgs attributes, verified elsewhere.
|
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyParameterAttrs(Attrs, Idx, Ty, Idx == 0, V);
|
2007-12-22 03:19:01 +08:00
|
|
|
|
2013-04-20 13:14:40 +08:00
|
|
|
if (Idx == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (Attrs.hasAttribute(Idx, Attribute::Nest)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!SawNest, "More than one parameter has attribute nest!", V);
|
2007-12-22 03:19:01 +08:00
|
|
|
SawNest = true;
|
|
|
|
}
|
|
|
|
|
2013-04-20 13:14:40 +08:00
|
|
|
if (Attrs.hasAttribute(Idx, Attribute::Returned)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!SawReturned, "More than one parameter has attribute returned!",
|
|
|
|
V);
|
|
|
|
Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
|
|
|
|
"Incompatible "
|
|
|
|
"argument and return types for 'returned' attribute",
|
|
|
|
V);
|
2013-04-20 13:14:40 +08:00
|
|
|
SawReturned = true;
|
|
|
|
}
|
|
|
|
|
2014-05-10 06:32:13 +08:00
|
|
|
if (Attrs.hasAttribute(Idx, Attribute::StructRet)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
|
|
|
|
Assert(Idx == 1 || Idx == 2,
|
|
|
|
"Attribute 'sret' is not on first or second parameter!", V);
|
2014-05-10 06:32:13 +08:00
|
|
|
SawSRet = true;
|
|
|
|
}
|
2014-01-17 06:59:24 +08:00
|
|
|
|
2016-03-30 01:37:21 +08:00
|
|
|
if (Attrs.hasAttribute(Idx, Attribute::SwiftSelf)) {
|
|
|
|
Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
|
|
|
|
SawSwiftSelf = true;
|
|
|
|
}
|
|
|
|
|
2016-04-02 05:41:15 +08:00
|
|
|
if (Attrs.hasAttribute(Idx, Attribute::SwiftError)) {
|
|
|
|
Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!",
|
|
|
|
V);
|
|
|
|
SawSwiftError = true;
|
|
|
|
}
|
|
|
|
|
2014-01-17 06:59:24 +08:00
|
|
|
if (Attrs.hasAttribute(Idx, Attribute::InAlloca)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Idx == FT->getNumParams(), "inalloca isn't on the last parameter!",
|
|
|
|
V);
|
2014-01-17 06:59:24 +08:00
|
|
|
}
|
2007-12-22 03:19:01 +08:00
|
|
|
}
|
2008-10-02 07:41:25 +08:00
|
|
|
|
2013-01-19 05:11:39 +08:00
|
|
|
if (!Attrs.hasAttributes(AttributeSet::FunctionIndex))
|
|
|
|
return;
|
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyAttributeTypes(Attrs, AttributeSet::FunctionIndex, true, V);
|
2013-01-19 05:11:39 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(
|
|
|
|
!(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) &&
|
|
|
|
Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly)),
|
|
|
|
"Attributes 'readnone and readonly' are incompatible!", V);
|
2013-01-19 05:11:39 +08:00
|
|
|
|
2015-12-17 00:16:19 +08:00
|
|
|
Assert(
|
|
|
|
!(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) &&
|
|
|
|
Attrs.hasAttribute(AttributeSet::FunctionIndex,
|
|
|
|
Attribute::InaccessibleMemOrArgMemOnly)),
|
|
|
|
"Attributes 'readnone and inaccessiblemem_or_argmemonly' are incompatible!", V);
|
|
|
|
|
|
|
|
Assert(
|
|
|
|
!(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) &&
|
|
|
|
Attrs.hasAttribute(AttributeSet::FunctionIndex,
|
|
|
|
Attribute::InaccessibleMemOnly)),
|
|
|
|
"Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(
|
|
|
|
!(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline) &&
|
|
|
|
Attrs.hasAttribute(AttributeSet::FunctionIndex,
|
|
|
|
Attribute::AlwaysInline)),
|
|
|
|
"Attributes 'noinline and alwaysinline' are incompatible!", V);
|
2013-08-23 19:53:55 +08:00
|
|
|
|
|
|
|
if (Attrs.hasAttribute(AttributeSet::FunctionIndex,
|
|
|
|
Attribute::OptimizeNone)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline),
|
|
|
|
"Attribute 'optnone' requires 'noinline'!", V);
|
2013-08-23 19:53:55 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex,
|
|
|
|
Attribute::OptimizeForSize),
|
|
|
|
"Attributes 'optsize and optnone' are incompatible!", V);
|
2013-08-23 19:53:55 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize),
|
|
|
|
"Attributes 'minsize and optnone' are incompatible!", V);
|
2013-08-23 19:53:55 +08:00
|
|
|
}
|
2014-06-06 03:29:43 +08:00
|
|
|
|
|
|
|
if (Attrs.hasAttribute(AttributeSet::FunctionIndex,
|
|
|
|
Attribute::JumpTable)) {
|
|
|
|
const GlobalValue *GV = cast<GlobalValue>(V);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(GV->hasUnnamedAddr(),
|
|
|
|
"Attribute 'jumptable' requires 'unnamed_addr'", V);
|
2014-06-06 03:29:43 +08:00
|
|
|
}
|
2016-04-12 09:05:35 +08:00
|
|
|
|
|
|
|
if (Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::AllocSize)) {
|
|
|
|
std::pair<unsigned, Optional<unsigned>> Args =
|
|
|
|
Attrs.getAllocSizeArgs(AttributeSet::FunctionIndex);
|
|
|
|
|
|
|
|
auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
|
|
|
|
if (ParamNo >= FT->getNumParams()) {
|
|
|
|
CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!FT->getParamType(ParamNo)->isIntegerTy()) {
|
|
|
|
CheckFailed("'allocsize' " + Name +
|
|
|
|
" argument must refer to an integer parameter",
|
|
|
|
V);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!CheckParam("element size", Args.first))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (Args.second && !CheckParam("number of elements", *Args.second))
|
|
|
|
return;
|
|
|
|
}
|
2007-12-22 03:19:01 +08:00
|
|
|
}
|
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
void Verifier::verifyFunctionMetadata(
|
2015-05-13 23:13:45 +08:00
|
|
|
const SmallVector<std::pair<unsigned, MDNode *>, 4> MDs) {
|
|
|
|
if (MDs.empty())
|
|
|
|
return;
|
|
|
|
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const auto &Pair : MDs) {
|
|
|
|
if (Pair.first == LLVMContext::MD_prof) {
|
|
|
|
MDNode *MD = Pair.second;
|
2015-05-13 23:13:45 +08:00
|
|
|
Assert(MD->getNumOperands() == 2,
|
|
|
|
"!prof annotations should have exactly 2 operands", MD);
|
|
|
|
|
|
|
|
// Check first operand.
|
|
|
|
Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
|
|
|
|
MD);
|
|
|
|
Assert(isa<MDString>(MD->getOperand(0)),
|
|
|
|
"expected string with name of the !prof annotation", MD);
|
|
|
|
MDString *MDS = cast<MDString>(MD->getOperand(0));
|
|
|
|
StringRef ProfName = MDS->getString();
|
|
|
|
Assert(ProfName.equals("function_entry_count"),
|
|
|
|
"first operand should be 'function_entry_count'", MD);
|
|
|
|
|
|
|
|
// Check second operand.
|
|
|
|
Assert(MD->getOperand(1) != nullptr, "second operand should not be null",
|
|
|
|
MD);
|
|
|
|
Assert(isa<ConstantAsMetadata>(MD->getOperand(1)),
|
|
|
|
"expected integer argument to function_entry_count", MD);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-11 01:56:06 +08:00
|
|
|
void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
|
|
|
|
if (!ConstantExprVisited.insert(EntryC).second)
|
|
|
|
return;
|
|
|
|
|
|
|
|
SmallVector<const Constant *, 16> Stack;
|
|
|
|
Stack.push_back(EntryC);
|
|
|
|
|
|
|
|
while (!Stack.empty()) {
|
|
|
|
const Constant *C = Stack.pop_back_val();
|
|
|
|
|
|
|
|
// Check this constant expression.
|
|
|
|
if (const auto *CE = dyn_cast<ConstantExpr>(C))
|
|
|
|
visitConstantExpr(CE);
|
|
|
|
|
2016-01-15 06:42:02 +08:00
|
|
|
if (const auto *GV = dyn_cast<GlobalValue>(C)) {
|
|
|
|
// Global Values get visited separately, but we do need to make sure
|
|
|
|
// that the global value is in the correct module
|
|
|
|
Assert(GV->getParent() == M, "Referencing global in another module!",
|
|
|
|
EntryC, M, GV, GV->getParent());
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-12-11 01:56:06 +08:00
|
|
|
// Visit all sub-expressions.
|
|
|
|
for (const Use &U : C->operands()) {
|
|
|
|
const auto *OpC = dyn_cast<Constant>(U);
|
|
|
|
if (!OpC)
|
|
|
|
continue;
|
|
|
|
if (!ConstantExprVisited.insert(OpC).second)
|
|
|
|
continue;
|
|
|
|
Stack.push_back(OpC);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitConstantExpr(const ConstantExpr *CE) {
|
2014-12-17 03:29:29 +08:00
|
|
|
if (CE->getOpcode() != Instruction::BitCast)
|
2013-08-01 01:49:08 +08:00
|
|
|
return;
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
|
|
|
|
CE->getType()),
|
|
|
|
"Invalid bitcast", CE);
|
2013-08-01 01:49:08 +08:00
|
|
|
}
|
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
bool Verifier::verifyAttributeCount(AttributeSet Attrs, unsigned Params) {
|
2013-01-30 14:54:41 +08:00
|
|
|
if (Attrs.getNumSlots() == 0)
|
2008-09-24 06:35:17 +08:00
|
|
|
return true;
|
2009-09-08 04:44:51 +08:00
|
|
|
|
2008-09-24 06:35:17 +08:00
|
|
|
unsigned LastSlot = Attrs.getNumSlots() - 1;
|
2013-01-26 05:30:53 +08:00
|
|
|
unsigned LastIndex = Attrs.getSlotIndex(LastSlot);
|
2008-09-24 06:35:17 +08:00
|
|
|
if (LastIndex <= Params
|
2013-01-26 05:30:53 +08:00
|
|
|
|| (LastIndex == AttributeSet::FunctionIndex
|
|
|
|
&& (LastSlot == 0 || Attrs.getSlotIndex(LastSlot - 1) <= Params)))
|
2008-09-24 06:35:17 +08:00
|
|
|
return true;
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2008-09-24 06:35:17 +08:00
|
|
|
return false;
|
|
|
|
}
|
2009-09-08 04:44:51 +08:00
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
/// Verify that statepoint intrinsic is well formed.
|
|
|
|
void Verifier::verifyStatepoint(ImmutableCallSite CS) {
|
2015-02-04 07:18:47 +08:00
|
|
|
assert(CS.getCalledFunction() &&
|
|
|
|
CS.getCalledFunction()->getIntrinsicID() ==
|
|
|
|
Intrinsic::experimental_gc_statepoint);
|
2015-01-31 07:28:05 +08:00
|
|
|
|
2015-02-04 07:18:47 +08:00
|
|
|
const Instruction &CI = *CS.getInstruction();
|
|
|
|
|
2015-07-11 18:30:36 +08:00
|
|
|
Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() &&
|
|
|
|
!CS.onlyAccessesArgMemory(),
|
|
|
|
"gc.statepoint must read and write all memory to preserve "
|
2015-03-08 05:15:40 +08:00
|
|
|
"reordering restrictions required by safepoint semantics",
|
|
|
|
&CI);
|
|
|
|
|
2015-05-13 07:52:24 +08:00
|
|
|
const Value *IDV = CS.getArgument(0);
|
|
|
|
Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer",
|
|
|
|
&CI);
|
|
|
|
|
|
|
|
const Value *NumPatchBytesV = CS.getArgument(1);
|
|
|
|
Assert(isa<ConstantInt>(NumPatchBytesV),
|
|
|
|
"gc.statepoint number of patchable bytes must be a constant integer",
|
|
|
|
&CI);
|
2015-05-14 04:11:59 +08:00
|
|
|
const int64_t NumPatchBytes =
|
2015-05-13 07:52:24 +08:00
|
|
|
cast<ConstantInt>(NumPatchBytesV)->getSExtValue();
|
|
|
|
assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
|
|
|
|
Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be "
|
|
|
|
"positive",
|
|
|
|
&CI);
|
|
|
|
|
|
|
|
const Value *Target = CS.getArgument(2);
|
2015-08-02 06:20:21 +08:00
|
|
|
auto *PT = dyn_cast<PointerType>(Target->getType());
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(PT && PT->getElementType()->isFunctionTy(),
|
|
|
|
"gc.statepoint callee must be of function pointer type", &CI, Target);
|
2015-05-09 08:02:06 +08:00
|
|
|
FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
|
2015-01-31 07:28:05 +08:00
|
|
|
|
2015-05-13 07:52:24 +08:00
|
|
|
const Value *NumCallArgsV = CS.getArgument(3);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(isa<ConstantInt>(NumCallArgsV),
|
|
|
|
"gc.statepoint number of arguments to underlying call "
|
|
|
|
"must be constant integer",
|
|
|
|
&CI);
|
2015-01-31 07:28:05 +08:00
|
|
|
const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(NumCallArgs >= 0,
|
|
|
|
"gc.statepoint number of arguments to underlying call "
|
|
|
|
"must be positive",
|
|
|
|
&CI);
|
2015-01-31 07:28:05 +08:00
|
|
|
const int NumParams = (int)TargetFuncType->getNumParams();
|
|
|
|
if (TargetFuncType->isVarArg()) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(NumCallArgs >= NumParams,
|
|
|
|
"gc.statepoint mismatch in number of vararg call args", &CI);
|
2015-01-31 07:28:05 +08:00
|
|
|
|
|
|
|
// TODO: Remove this limitation
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(TargetFuncType->getReturnType()->isVoidTy(),
|
|
|
|
"gc.statepoint doesn't support wrapping non-void "
|
|
|
|
"vararg functions yet",
|
|
|
|
&CI);
|
2015-01-31 07:28:05 +08:00
|
|
|
} else
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(NumCallArgs == NumParams,
|
|
|
|
"gc.statepoint mismatch in number of call args", &CI);
|
2015-01-31 07:28:05 +08:00
|
|
|
|
2015-05-13 07:52:24 +08:00
|
|
|
const Value *FlagsV = CS.getArgument(4);
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
Assert(isa<ConstantInt>(FlagsV),
|
|
|
|
"gc.statepoint flags must be constant integer", &CI);
|
|
|
|
const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue();
|
|
|
|
Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
|
|
|
|
"unknown flag used in gc.statepoint flags argument", &CI);
|
2015-01-31 07:28:05 +08:00
|
|
|
|
|
|
|
// Verify that the types of the call parameter arguments match
|
|
|
|
// the type of the wrapped callee.
|
|
|
|
for (int i = 0; i < NumParams; i++) {
|
|
|
|
Type *ParamType = TargetFuncType->getParamType(i);
|
2015-05-13 07:52:24 +08:00
|
|
|
Type *ArgType = CS.getArgument(5 + i)->getType();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ArgType == ParamType,
|
|
|
|
"gc.statepoint call argument does not match wrapped "
|
|
|
|
"function type",
|
|
|
|
&CI);
|
2015-01-31 07:28:05 +08:00
|
|
|
}
|
2015-05-13 07:52:24 +08:00
|
|
|
|
|
|
|
const int EndCallArgsInx = 4 + NumCallArgs;
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
|
|
|
|
const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1);
|
|
|
|
Assert(isa<ConstantInt>(NumTransitionArgsV),
|
|
|
|
"gc.statepoint number of transition arguments "
|
|
|
|
"must be constant integer",
|
|
|
|
&CI);
|
|
|
|
const int NumTransitionArgs =
|
|
|
|
cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
|
|
|
|
Assert(NumTransitionArgs >= 0,
|
|
|
|
"gc.statepoint number of transition arguments must be positive", &CI);
|
|
|
|
const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
|
|
|
|
|
|
|
|
const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(isa<ConstantInt>(NumDeoptArgsV),
|
|
|
|
"gc.statepoint number of deoptimization arguments "
|
|
|
|
"must be constant integer",
|
|
|
|
&CI);
|
2015-01-31 07:28:05 +08:00
|
|
|
const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments "
|
|
|
|
"must be positive",
|
|
|
|
&CI);
|
|
|
|
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
const int ExpectedNumArgs =
|
2015-05-13 07:52:24 +08:00
|
|
|
7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
Assert(ExpectedNumArgs <= (int)CS.arg_size(),
|
2015-03-08 05:15:40 +08:00
|
|
|
"gc.statepoint too few arguments according to length fields", &CI);
|
2015-01-31 07:28:05 +08:00
|
|
|
|
|
|
|
// Check that the only uses of this gc.statepoint are gc.result or
|
|
|
|
// gc.relocate calls which are tied to this statepoint and thus part
|
|
|
|
// of the same statepoint sequence
|
2015-02-04 07:18:47 +08:00
|
|
|
for (const User *U : CI.users()) {
|
2015-01-31 07:28:05 +08:00
|
|
|
const CallInst *Call = dyn_cast<const CallInst>(U);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Call, "illegal use of statepoint token", &CI, U);
|
2015-01-31 07:28:05 +08:00
|
|
|
if (!Call) continue;
|
2016-04-13 02:05:10 +08:00
|
|
|
Assert(isa<GCRelocateInst>(Call) || isa<GCResultInst>(Call),
|
2015-03-08 05:15:40 +08:00
|
|
|
"gc.result or gc.relocate are the only value uses"
|
|
|
|
"of a gc.statepoint",
|
|
|
|
&CI, U);
|
2016-04-13 02:05:10 +08:00
|
|
|
if (isa<GCResultInst>(Call)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Call->getArgOperand(0) == &CI,
|
|
|
|
"gc.result connected to wrong gc.statepoint", &CI, Call);
|
2016-01-05 12:03:00 +08:00
|
|
|
} else if (isa<GCRelocateInst>(Call)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Call->getArgOperand(0) == &CI,
|
|
|
|
"gc.relocate connected to wrong gc.statepoint", &CI, Call);
|
2015-01-31 07:28:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: It is legal for a single derived pointer to be listed multiple
|
|
|
|
// times. It's non-optimal, but it is legal. It can also happen after
|
|
|
|
// insertion if we strip a bitcast away.
|
|
|
|
// Note: It is really tempting to check that each base is relocated and
|
|
|
|
// that a derived pointer is never reused as a base pointer. This turns
|
|
|
|
// out to be problematic since optimizations run after safepoint insertion
|
|
|
|
// can recognize equality properties that the insertion logic doesn't know
|
|
|
|
// about. See example statepoint.ll in the verifier subdirectory
|
|
|
|
}
|
|
|
|
|
2015-03-06 02:26:34 +08:00
|
|
|
void Verifier::verifyFrameRecoverIndices() {
|
|
|
|
for (auto &Counts : FrameEscapeInfo) {
|
|
|
|
Function *F = Counts.first;
|
|
|
|
unsigned EscapedObjectCount = Counts.second.first;
|
|
|
|
unsigned MaxRecoveredIndex = Counts.second.second;
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(MaxRecoveredIndex <= EscapedObjectCount,
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
"all indices passed to llvm.localrecover must be less than the "
|
|
|
|
"number of arguments passed ot llvm.localescape in the parent "
|
2015-03-08 05:15:40 +08:00
|
|
|
"function",
|
|
|
|
F);
|
2015-03-06 02:26:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-10 12:31:05 +08:00
|
|
|
static Instruction *getSuccPad(TerminatorInst *Terminator) {
|
|
|
|
BasicBlock *UnwindDest;
|
|
|
|
if (auto *II = dyn_cast<InvokeInst>(Terminator))
|
|
|
|
UnwindDest = II->getUnwindDest();
|
|
|
|
else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
|
|
|
|
UnwindDest = CSI->getUnwindDest();
|
|
|
|
else
|
|
|
|
UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
|
|
|
|
return UnwindDest->getFirstNonPHI();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::verifySiblingFuncletUnwinds() {
|
|
|
|
SmallPtrSet<Instruction *, 8> Visited;
|
|
|
|
SmallPtrSet<Instruction *, 8> Active;
|
|
|
|
for (const auto &Pair : SiblingFuncletInfo) {
|
|
|
|
Instruction *PredPad = Pair.first;
|
|
|
|
if (Visited.count(PredPad))
|
|
|
|
continue;
|
|
|
|
Active.insert(PredPad);
|
|
|
|
TerminatorInst *Terminator = Pair.second;
|
|
|
|
do {
|
|
|
|
Instruction *SuccPad = getSuccPad(Terminator);
|
|
|
|
if (Active.count(SuccPad)) {
|
|
|
|
// Found a cycle; report error
|
|
|
|
Instruction *CyclePad = SuccPad;
|
|
|
|
SmallVector<Instruction *, 8> CycleNodes;
|
|
|
|
do {
|
|
|
|
CycleNodes.push_back(CyclePad);
|
|
|
|
TerminatorInst *CycleTerminator = SiblingFuncletInfo[CyclePad];
|
|
|
|
if (CycleTerminator != CyclePad)
|
|
|
|
CycleNodes.push_back(CycleTerminator);
|
|
|
|
CyclePad = getSuccPad(CycleTerminator);
|
|
|
|
} while (CyclePad != SuccPad);
|
|
|
|
Assert(false, "EH pads can't handle each other's exceptions",
|
|
|
|
ArrayRef<Instruction *>(CycleNodes));
|
|
|
|
}
|
|
|
|
// Don't re-walk a node we've already checked
|
|
|
|
if (!Visited.insert(SuccPad).second)
|
|
|
|
break;
|
|
|
|
// Walk to this successor if it has a map entry.
|
|
|
|
PredPad = SuccPad;
|
|
|
|
auto TermI = SiblingFuncletInfo.find(PredPad);
|
|
|
|
if (TermI == SiblingFuncletInfo.end())
|
|
|
|
break;
|
|
|
|
Terminator = TermI->second;
|
|
|
|
Active.insert(PredPad);
|
|
|
|
} while (true);
|
|
|
|
// Each node only has one successor, so we've walked all the active
|
|
|
|
// nodes' successors.
|
|
|
|
Active.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-04-19 04:37:37 +08:00
|
|
|
// visitFunction - Verify that a function is ok.
|
2002-02-21 01:55:43 +08:00
|
|
|
//
|
2014-01-19 10:22:18 +08:00
|
|
|
void Verifier::visitFunction(const Function &F) {
|
2005-05-09 06:27:09 +08:00
|
|
|
// Check function arguments.
|
2011-07-18 12:54:35 +08:00
|
|
|
FunctionType *FT = F.getFunctionType();
|
2007-08-18 14:13:19 +08:00
|
|
|
unsigned NumArgs = F.arg_size();
|
2002-04-14 06:48:46 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Context == &F.getContext(),
|
|
|
|
"Function context does not match Module context!", &F);
|
2010-02-16 05:52:04 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
|
|
|
|
Assert(FT->getNumParams() == NumArgs,
|
|
|
|
"# formal arguments must match # of arguments for function type!", &F,
|
|
|
|
FT);
|
|
|
|
Assert(F.getReturnType()->isFirstClassType() ||
|
|
|
|
F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
|
|
|
|
"Functions cannot return aggregate values!", &F);
|
2002-04-14 06:48:46 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
|
|
|
|
"Invalid struct return type!", &F);
|
2008-03-04 05:46:28 +08:00
|
|
|
|
2013-04-19 04:15:25 +08:00
|
|
|
AttributeSet Attrs = F.getAttributes();
|
2008-01-12 06:36:48 +08:00
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
Assert(verifyAttributeCount(Attrs, FT->getNumParams()),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Attribute after last parameter!", &F);
|
2008-01-12 06:36:48 +08:00
|
|
|
|
2007-12-22 03:19:01 +08:00
|
|
|
// Check function attributes.
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyFunctionAttrs(FT, Attrs, &F);
|
2007-07-27 23:09:54 +08:00
|
|
|
|
2013-06-27 08:25:01 +08:00
|
|
|
// On function declarations/definitions, we do not support the builtin
|
|
|
|
// attribute. We do not check this in VerifyFunctionAttrs since that is
|
|
|
|
// checking for Attributes that can/can not ever be on functions.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::Builtin),
|
|
|
|
"Attribute 'builtin' can only be applied to a callsite.", &F);
|
2013-06-27 08:25:01 +08:00
|
|
|
|
2006-05-20 05:25:17 +08:00
|
|
|
// Check that this function meets the restrictions on this calling convention.
|
2014-08-30 05:25:28 +08:00
|
|
|
// Sometimes varargs is used for perfectly forwarding thunks, so some of these
|
|
|
|
// restrictions can be lifted.
|
2006-05-20 05:25:17 +08:00
|
|
|
switch (F.getCallingConv()) {
|
|
|
|
default:
|
|
|
|
case CallingConv::C:
|
|
|
|
break;
|
|
|
|
case CallingConv::Fast:
|
|
|
|
case CallingConv::Cold:
|
2012-10-24 22:46:16 +08:00
|
|
|
case CallingConv::Intel_OCL_BI:
|
2010-09-25 15:46:17 +08:00
|
|
|
case CallingConv::PTX_Kernel:
|
|
|
|
case CallingConv::PTX_Device:
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!F.isVarArg(), "Calling convention does not support varargs or "
|
|
|
|
"perfect forwarding!",
|
|
|
|
&F);
|
2006-05-20 05:25:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-09-08 04:44:51 +08:00
|
|
|
|
2009-05-30 13:06:04 +08:00
|
|
|
bool isLLVMdotName = F.getName().size() >= 5 &&
|
|
|
|
F.getName().substr(0, 5) == "llvm.";
|
|
|
|
|
2002-04-14 06:48:46 +08:00
|
|
|
// Check that the argument values match the function type for this function...
|
2002-10-14 04:57:00 +08:00
|
|
|
unsigned i = 0;
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const Argument &Arg : F.args()) {
|
|
|
|
Assert(Arg.getType() == FT->getParamType(i),
|
|
|
|
"Argument value does not match function argument type!", &Arg,
|
2015-03-08 05:15:40 +08:00
|
|
|
FT->getParamType(i));
|
2016-02-26 00:44:27 +08:00
|
|
|
Assert(Arg.getType()->isFirstClassType(),
|
|
|
|
"Function arguments must have first-class types!", &Arg);
|
2015-08-14 13:09:07 +08:00
|
|
|
if (!isLLVMdotName) {
|
2016-02-26 00:44:27 +08:00
|
|
|
Assert(!Arg.getType()->isMetadataTy(),
|
|
|
|
"Function takes metadata but isn't an intrinsic", &Arg, &F);
|
|
|
|
Assert(!Arg.getType()->isTokenTy(),
|
|
|
|
"Function takes token but isn't an intrinsic", &Arg, &F);
|
2015-08-14 13:09:07 +08:00
|
|
|
}
|
2016-04-02 05:41:15 +08:00
|
|
|
|
|
|
|
// Check that swifterror argument is only used by loads and stores.
|
|
|
|
if (Attrs.hasAttribute(i+1, Attribute::SwiftError)) {
|
|
|
|
verifySwiftErrorValue(&Arg);
|
|
|
|
}
|
2016-02-26 00:44:27 +08:00
|
|
|
++i;
|
2008-08-27 22:44:57 +08:00
|
|
|
}
|
2002-10-14 04:57:00 +08:00
|
|
|
|
2015-08-14 13:09:07 +08:00
|
|
|
if (!isLLVMdotName)
|
|
|
|
Assert(!F.getReturnType()->isTokenTy(),
|
|
|
|
"Functions returns a token but isn't an intrinsic", &F);
|
|
|
|
|
2015-04-25 05:53:27 +08:00
|
|
|
// Get the function metadata attachments.
|
|
|
|
SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
|
|
|
|
F.getAllMetadata(MDs);
|
|
|
|
assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyFunctionMetadata(MDs);
|
2015-04-25 05:53:27 +08:00
|
|
|
|
2015-11-16 13:13:30 +08:00
|
|
|
// Check validity of the personality function
|
|
|
|
if (F.hasPersonalityFn()) {
|
|
|
|
auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
|
|
|
|
if (Per)
|
|
|
|
Assert(Per->getParent() == F.getParent(),
|
2015-12-02 03:06:36 +08:00
|
|
|
"Referencing personality function in another module!",
|
|
|
|
&F, F.getParent(), Per, Per->getParent());
|
2015-11-16 13:13:30 +08:00
|
|
|
}
|
|
|
|
|
2010-01-28 04:34:15 +08:00
|
|
|
if (F.isMaterializable()) {
|
|
|
|
// Function has a body somewhere we can't see.
|
2015-04-25 05:53:27 +08:00
|
|
|
Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F,
|
|
|
|
MDs.empty() ? nullptr : MDs.front().second);
|
2010-01-28 04:34:15 +08:00
|
|
|
} else if (F.isDeclaration()) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(F.hasExternalLinkage() || F.hasExternalWeakLinkage(),
|
|
|
|
"invalid linkage type for function declaration", &F);
|
2015-04-25 05:53:27 +08:00
|
|
|
Assert(MDs.empty(), "function without a body cannot have metadata", &F,
|
|
|
|
MDs.empty() ? nullptr : MDs.front().second);
|
2015-06-18 04:52:32 +08:00
|
|
|
Assert(!F.hasPersonalityFn(),
|
|
|
|
"Function declaration shouldn't have a personality routine", &F);
|
2007-09-20 01:14:45 +08:00
|
|
|
} else {
|
2006-12-13 12:45:46 +08:00
|
|
|
// Verify that this function (which has a body) is not named "llvm.*". It
|
|
|
|
// is not legal to define intrinsics.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2002-10-14 04:57:00 +08:00
|
|
|
// Check the entry node
|
2014-01-19 10:22:18 +08:00
|
|
|
const BasicBlock *Entry = &F.getEntryBlock();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(pred_empty(Entry),
|
|
|
|
"Entry block to function must not have predecessors!", Entry);
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2009-11-01 12:08:01 +08:00
|
|
|
// The address of the entry block cannot be taken, unless it is dead.
|
|
|
|
if (Entry->hasAddressTaken()) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!BlockAddress::lookup(Entry)->isConstantUsed(),
|
|
|
|
"blockaddress may not be used with the entry block!", Entry);
|
2009-11-01 12:08:01 +08:00
|
|
|
}
|
2015-04-25 05:53:27 +08:00
|
|
|
|
|
|
|
// Visit metadata attachments.
|
2015-08-29 05:55:35 +08:00
|
|
|
for (const auto &I : MDs) {
|
|
|
|
// Verify that the attachment is legal.
|
|
|
|
switch (I.first) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case LLVMContext::MD_dbg:
|
|
|
|
Assert(isa<DISubprogram>(I.second),
|
|
|
|
"function !dbg attachment must be a subprogram", &F, I.second);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the metadata itself.
|
2015-04-25 05:53:27 +08:00
|
|
|
visitMDNode(*I.second);
|
2015-08-29 05:55:35 +08:00
|
|
|
}
|
2002-04-14 06:48:46 +08:00
|
|
|
}
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2009-09-12 01:05:29 +08:00
|
|
|
// If this function is actually an intrinsic, verify that it is only used in
|
|
|
|
// direct call/invokes, never having its "address taken".
|
2016-01-16 03:00:20 +08:00
|
|
|
// Only do this if the module is materialized, otherwise we don't have all the
|
|
|
|
// uses.
|
|
|
|
if (F.getIntrinsicID() && F.getParent()->isMaterialized()) {
|
2010-03-24 21:21:49 +08:00
|
|
|
const User *U;
|
|
|
|
if (F.hasAddressTaken(&U))
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(0, "Invalid user of intrinsic instruction!", U);
|
2009-09-12 01:05:29 +08:00
|
|
|
}
|
2014-01-14 23:22:47 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!F.hasDLLImportStorageClass() ||
|
|
|
|
(F.isDeclaration() && F.hasExternalLinkage()) ||
|
|
|
|
F.hasAvailableExternallyLinkage(),
|
|
|
|
"Function is marked as dllimport, but not external.", &F);
|
2015-11-06 06:03:56 +08:00
|
|
|
|
|
|
|
auto *N = F.getSubprogram();
|
|
|
|
if (!N)
|
|
|
|
return;
|
|
|
|
|
2016-04-15 23:57:41 +08:00
|
|
|
visitDISubprogram(*N);
|
|
|
|
|
2015-11-06 06:03:56 +08:00
|
|
|
// Check that all !dbg attachments lead to back to N (or, at least, another
|
|
|
|
// subprogram that describes the same function).
|
|
|
|
//
|
|
|
|
// FIXME: Check this incrementally while visiting !dbg attachments.
|
|
|
|
// FIXME: Only check when N is the canonical subprogram for F.
|
|
|
|
SmallPtrSet<const MDNode *, 32> Seen;
|
|
|
|
for (auto &BB : F)
|
|
|
|
for (auto &I : BB) {
|
|
|
|
// Be careful about using DILocation here since we might be dealing with
|
|
|
|
// broken code (this is the Verifier after all).
|
|
|
|
DILocation *DL =
|
|
|
|
dyn_cast_or_null<DILocation>(I.getDebugLoc().getAsMDNode());
|
|
|
|
if (!DL)
|
|
|
|
continue;
|
|
|
|
if (!Seen.insert(DL).second)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
DILocalScope *Scope = DL->getInlinedAtScope();
|
|
|
|
if (Scope && !Seen.insert(Scope).second)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr;
|
2015-12-07 07:05:38 +08:00
|
|
|
|
|
|
|
// Scope and SP could be the same MDNode and we don't want to skip
|
|
|
|
// validation in that case
|
|
|
|
if (SP && ((Scope != SP) && !Seen.insert(SP).second))
|
2015-11-06 06:03:56 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// FIXME: Once N is canonical, check "SP == &N".
|
|
|
|
Assert(SP->describes(&F),
|
|
|
|
"!dbg attachment points at wrong subprogram for function", N, &F,
|
|
|
|
&I, DL, Scope, SP);
|
|
|
|
}
|
2002-04-19 04:37:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// verifyBasicBlock - Verify that a basic block is well formed...
|
|
|
|
//
|
2002-06-25 23:56:27 +08:00
|
|
|
void Verifier::visitBasicBlock(BasicBlock &BB) {
|
2004-09-30 04:07:45 +08:00
|
|
|
InstsInThisBlock.clear();
|
|
|
|
|
2004-12-04 10:30:42 +08:00
|
|
|
// Ensure that basic blocks have terminators!
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
|
2004-12-04 10:30:42 +08:00
|
|
|
|
2003-10-06 01:44:18 +08:00
|
|
|
// Check constraints that this basic block imposes on all of the PHI nodes in
|
|
|
|
// it.
|
|
|
|
if (isa<PHINode>(BB.front())) {
|
2007-02-10 16:33:11 +08:00
|
|
|
SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
|
|
|
|
SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
|
2003-10-06 01:44:18 +08:00
|
|
|
std::sort(Preds.begin(), Preds.end());
|
2005-04-22 07:48:37 +08:00
|
|
|
PHINode *PN;
|
2004-06-06 01:44:48 +08:00
|
|
|
for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I));++I) {
|
2003-10-06 01:44:18 +08:00
|
|
|
// Ensure that PHI nodes have at least one entry!
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(PN->getNumIncomingValues() != 0,
|
|
|
|
"PHI nodes must have at least one entry. If the block is dead, "
|
|
|
|
"the PHI should be removed!",
|
|
|
|
PN);
|
|
|
|
Assert(PN->getNumIncomingValues() == Preds.size(),
|
|
|
|
"PHINode should have one entry for each predecessor of its "
|
|
|
|
"parent basic block!",
|
|
|
|
PN);
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2003-10-06 01:44:18 +08:00
|
|
|
// Get and sort all incoming values in the PHI node...
|
2007-02-10 16:33:11 +08:00
|
|
|
Values.clear();
|
2003-10-06 01:44:18 +08:00
|
|
|
Values.reserve(PN->getNumIncomingValues());
|
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
|
|
|
|
Values.push_back(std::make_pair(PN->getIncomingBlock(i),
|
|
|
|
PN->getIncomingValue(i)));
|
|
|
|
std::sort(Values.begin(), Values.end());
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2003-10-06 01:44:18 +08:00
|
|
|
for (unsigned i = 0, e = Values.size(); i != e; ++i) {
|
|
|
|
// Check to make sure that if there is more than one entry for a
|
|
|
|
// particular basic block in this PHI node, that the incoming values are
|
|
|
|
// all identical.
|
|
|
|
//
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(i == 0 || Values[i].first != Values[i - 1].first ||
|
|
|
|
Values[i].second == Values[i - 1].second,
|
|
|
|
"PHI node has multiple entries for the same basic block with "
|
|
|
|
"different incoming values!",
|
|
|
|
PN, Values[i].first, Values[i].second, Values[i - 1].second);
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2003-10-06 01:44:18 +08:00
|
|
|
// Check to make sure that the predecessors and PHI node entries are
|
|
|
|
// matched up.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Values[i].first == Preds[i],
|
|
|
|
"PHI node entries do not match predecessors!", PN,
|
|
|
|
Values[i].first, Preds[i]);
|
2003-10-06 01:44:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-11-21 08:39:43 +08:00
|
|
|
|
|
|
|
// Check that all instructions have their parent pointers set up correctly.
|
2014-11-21 09:19:09 +08:00
|
|
|
for (auto &I : BB)
|
|
|
|
{
|
2014-11-21 08:39:43 +08:00
|
|
|
Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!");
|
2014-11-21 09:19:09 +08:00
|
|
|
}
|
2002-06-25 23:56:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitTerminatorInst(TerminatorInst &I) {
|
|
|
|
// Ensure that terminators only exist at the end of the basic block.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(&I == I.getParent()->getTerminator(),
|
|
|
|
"Terminator found in the middle of a basic block!", I.getParent());
|
2002-07-18 08:13:42 +08:00
|
|
|
visitInstruction(I);
|
2002-06-25 23:56:27 +08:00
|
|
|
}
|
|
|
|
|
2010-02-16 06:09:09 +08:00
|
|
|
void Verifier::visitBranchInst(BranchInst &BI) {
|
|
|
|
if (BI.isConditional()) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(BI.getCondition()->getType()->isIntegerTy(1),
|
|
|
|
"Branch condition is not 'i1' type!", &BI, BI.getCondition());
|
2010-02-16 06:09:09 +08:00
|
|
|
}
|
|
|
|
visitTerminatorInst(BI);
|
|
|
|
}
|
|
|
|
|
2002-06-25 23:56:27 +08:00
|
|
|
void Verifier::visitReturnInst(ReturnInst &RI) {
|
|
|
|
Function *F = RI.getParent()->getParent();
|
2008-02-23 08:35:18 +08:00
|
|
|
unsigned N = RI.getNumOperands();
|
2013-07-21 01:46:00 +08:00
|
|
|
if (F->getReturnType()->isVoidTy())
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N == 0,
|
|
|
|
"Found return instr that returns non-void in Function of void "
|
|
|
|
"return type!",
|
|
|
|
&RI, F->getReturnType());
|
2011-04-04 15:44:02 +08:00
|
|
|
else
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
|
|
|
|
"Function return type does not match operand "
|
|
|
|
"type of return inst!",
|
|
|
|
&RI, F->getReturnType());
|
2009-09-08 04:44:51 +08:00
|
|
|
|
2003-08-18 22:43:39 +08:00
|
|
|
// Check to make sure that the return value has necessary properties for
|
2002-06-25 23:56:27 +08:00
|
|
|
// terminators...
|
|
|
|
visitTerminatorInst(RI);
|
2001-06-07 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2004-05-22 00:47:21 +08:00
|
|
|
void Verifier::visitSwitchInst(SwitchInst &SI) {
|
|
|
|
// Check to make sure that all of the constants in the switch instruction
|
|
|
|
// have the same type as the switched-on value.
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SwitchTy = SI.getCondition()->getType();
|
Revert patches to add case-range support for PR1255.
The work on this project was left in an unfinished and inconsistent state.
Hopefully someone will eventually get a chance to implement this feature, but
in the meantime, it is better to put things back the way the were. I have
left support in the bitcode reader to handle the case-range bitcode format,
so that we do not lose bitcode compatibility with the llvm 3.3 release.
This reverts the following commits: 155464, 156374, 156377, 156613, 156704,
156757, 156804 156808, 156985, 157046, 157112, 157183, 157315, 157384, 157575,
157576, 157586, 157612, 157810, 157814, 157815, 157880, 157881, 157882, 157884,
157887, 157901, 158979, 157987, 157989, 158986, 158997, 159076, 159101, 159100,
159200, 159201, 159207, 159527, 159532, 159540, 159583, 159618, 159658, 159659,
159660, 159661, 159703, 159704, 160076, 167356, 172025, 186736
llvm-svn: 190328
2013-09-10 03:14:35 +08:00
|
|
|
SmallPtrSet<ConstantInt*, 32> Constants;
|
2016-02-26 00:44:27 +08:00
|
|
|
for (auto &Case : SI.cases()) {
|
|
|
|
Assert(Case.getCaseValue()->getType() == SwitchTy,
|
2015-03-08 05:15:40 +08:00
|
|
|
"Switch constants must all be same type as switch value!", &SI);
|
2016-02-26 00:44:27 +08:00
|
|
|
Assert(Constants.insert(Case.getCaseValue()).second,
|
|
|
|
"Duplicate integer as switch case", &SI, Case.getCaseValue());
|
2012-05-21 18:44:40 +08:00
|
|
|
}
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2004-05-22 00:47:21 +08:00
|
|
|
visitTerminatorInst(SI);
|
|
|
|
}
|
|
|
|
|
2010-08-03 07:08:33 +08:00
|
|
|
void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(BI.getAddress()->getType()->isPointerTy(),
|
|
|
|
"Indirectbr operand must have pointer type!", &BI);
|
2010-08-03 07:08:33 +08:00
|
|
|
for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(BI.getDestination(i)->getType()->isLabelTy(),
|
|
|
|
"Indirectbr destinations must all have pointer type!", &BI);
|
2010-08-03 07:08:33 +08:00
|
|
|
|
|
|
|
visitTerminatorInst(BI);
|
|
|
|
}
|
|
|
|
|
2004-03-12 13:54:31 +08:00
|
|
|
void Verifier::visitSelectInst(SelectInst &SI) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
|
|
|
|
SI.getOperand(2)),
|
|
|
|
"Invalid operands for select instruction!", &SI);
|
2008-12-29 08:12:50 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SI.getTrueValue()->getType() == SI.getType(),
|
|
|
|
"Select values must have same type as select instruction!", &SI);
|
2004-09-30 05:19:28 +08:00
|
|
|
visitInstruction(SI);
|
2004-03-12 13:54:31 +08:00
|
|
|
}
|
|
|
|
|
2004-03-02 08:22:19 +08:00
|
|
|
/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
|
|
|
|
/// a pass, if any exist, it's an error.
|
|
|
|
///
|
2002-11-22 00:54:22 +08:00
|
|
|
void Verifier::visitUserOp1(Instruction &I) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(0, "User-defined operators should not live outside of a pass!", &I);
|
2002-11-22 00:54:22 +08:00
|
|
|
}
|
2001-06-07 04:29:01 +08:00
|
|
|
|
2006-11-27 09:05:10 +08:00
|
|
|
void Verifier::visitTruncInst(TruncInst &I) {
|
|
|
|
// Get the source and destination types
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
// Get the size of the types in bits, we'll need this later
|
2009-06-16 06:12:54 +08:00
|
|
|
unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
|
|
|
|
unsigned DestBitSize = DestTy->getScalarSizeInBits();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
|
|
|
|
Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
|
|
|
|
Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
|
|
|
|
"trunc source and destination must both be a vector or neither", &I);
|
|
|
|
Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitZExtInst(ZExtInst &I) {
|
|
|
|
// Get the source and destination types
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
// Get the size of the types in bits, we'll need this later
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
|
|
|
|
Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
|
|
|
|
Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
|
|
|
|
"zext source and destination must both be a vector or neither", &I);
|
2009-06-16 06:12:54 +08:00
|
|
|
unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
|
|
|
|
unsigned DestBitSize = DestTy->getScalarSizeInBits();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitSExtInst(SExtInst &I) {
|
|
|
|
// Get the source and destination types
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
// Get the size of the types in bits, we'll need this later
|
2009-06-16 06:12:54 +08:00
|
|
|
unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
|
|
|
|
unsigned DestBitSize = DestTy->getScalarSizeInBits();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
|
|
|
|
Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
|
|
|
|
Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
|
|
|
|
"sext source and destination must both be a vector or neither", &I);
|
|
|
|
Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitFPTruncInst(FPTruncInst &I) {
|
|
|
|
// Get the source and destination types
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
2006-11-27 09:05:10 +08:00
|
|
|
// Get the size of the types in bits, we'll need this later
|
2009-06-16 06:12:54 +08:00
|
|
|
unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
|
|
|
|
unsigned DestBitSize = DestTy->getScalarSizeInBits();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
|
|
|
|
Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
|
|
|
|
Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
|
|
|
|
"fptrunc source and destination must both be a vector or neither", &I);
|
|
|
|
Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitFPExtInst(FPExtInst &I) {
|
|
|
|
// Get the source and destination types
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
// Get the size of the types in bits, we'll need this later
|
2009-06-16 06:12:54 +08:00
|
|
|
unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
|
|
|
|
unsigned DestBitSize = DestTy->getScalarSizeInBits();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
|
|
|
|
Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
|
|
|
|
Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
|
|
|
|
"fpext source and destination must both be a vector or neither", &I);
|
|
|
|
Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitUIToFPInst(UIToFPInst &I) {
|
|
|
|
// Get the source and destination types
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2010-02-16 19:11:14 +08:00
|
|
|
bool SrcVec = SrcTy->isVectorTy();
|
|
|
|
bool DstVec = DestTy->isVectorTy();
|
2007-11-17 11:58:34 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcVec == DstVec,
|
|
|
|
"UIToFP source and dest must both be vector or scalar", &I);
|
|
|
|
Assert(SrcTy->isIntOrIntVectorTy(),
|
|
|
|
"UIToFP source must be integer or integer vector", &I);
|
|
|
|
Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
|
|
|
|
&I);
|
2007-11-17 11:58:34 +08:00
|
|
|
|
|
|
|
if (SrcVec && DstVec)
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(cast<VectorType>(SrcTy)->getNumElements() ==
|
|
|
|
cast<VectorType>(DestTy)->getNumElements(),
|
|
|
|
"UIToFP source and dest vector length mismatch", &I);
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitSIToFPInst(SIToFPInst &I) {
|
|
|
|
// Get the source and destination types
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2010-02-16 19:11:14 +08:00
|
|
|
bool SrcVec = SrcTy->isVectorTy();
|
|
|
|
bool DstVec = DestTy->isVectorTy();
|
2007-11-17 11:58:34 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcVec == DstVec,
|
|
|
|
"SIToFP source and dest must both be vector or scalar", &I);
|
|
|
|
Assert(SrcTy->isIntOrIntVectorTy(),
|
|
|
|
"SIToFP source must be integer or integer vector", &I);
|
|
|
|
Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
|
|
|
|
&I);
|
2007-11-17 11:58:34 +08:00
|
|
|
|
|
|
|
if (SrcVec && DstVec)
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(cast<VectorType>(SrcTy)->getNumElements() ==
|
|
|
|
cast<VectorType>(DestTy)->getNumElements(),
|
|
|
|
"SIToFP source and dest vector length mismatch", &I);
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitFPToUIInst(FPToUIInst &I) {
|
|
|
|
// Get the source and destination types
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2010-02-16 19:11:14 +08:00
|
|
|
bool SrcVec = SrcTy->isVectorTy();
|
|
|
|
bool DstVec = DestTy->isVectorTy();
|
2007-11-17 11:58:34 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcVec == DstVec,
|
|
|
|
"FPToUI source and dest must both be vector or scalar", &I);
|
|
|
|
Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector",
|
|
|
|
&I);
|
|
|
|
Assert(DestTy->isIntOrIntVectorTy(),
|
|
|
|
"FPToUI result must be integer or integer vector", &I);
|
2007-11-17 11:58:34 +08:00
|
|
|
|
|
|
|
if (SrcVec && DstVec)
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(cast<VectorType>(SrcTy)->getNumElements() ==
|
|
|
|
cast<VectorType>(DestTy)->getNumElements(),
|
|
|
|
"FPToUI source and dest vector length mismatch", &I);
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitFPToSIInst(FPToSIInst &I) {
|
|
|
|
// Get the source and destination types
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2010-02-16 19:11:14 +08:00
|
|
|
bool SrcVec = SrcTy->isVectorTy();
|
|
|
|
bool DstVec = DestTy->isVectorTy();
|
2007-11-17 11:58:34 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcVec == DstVec,
|
|
|
|
"FPToSI source and dest must both be vector or scalar", &I);
|
|
|
|
Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector",
|
|
|
|
&I);
|
|
|
|
Assert(DestTy->isIntOrIntVectorTy(),
|
|
|
|
"FPToSI result must be integer or integer vector", &I);
|
2007-11-17 11:58:34 +08:00
|
|
|
|
|
|
|
if (SrcVec && DstVec)
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(cast<VectorType>(SrcTy)->getNumElements() ==
|
|
|
|
cast<VectorType>(DestTy)->getNumElements(),
|
|
|
|
"FPToSI source and dest vector length mismatch", &I);
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
|
|
|
|
// Get the source and destination types
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcTy->getScalarType()->isPointerTy(),
|
|
|
|
"PtrToInt source must be pointer", &I);
|
|
|
|
Assert(DestTy->getScalarType()->isIntegerTy(),
|
|
|
|
"PtrToInt result must be integral", &I);
|
|
|
|
Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
|
|
|
|
&I);
|
2011-12-05 14:29:09 +08:00
|
|
|
|
|
|
|
if (SrcTy->isVectorTy()) {
|
|
|
|
VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
|
|
|
|
VectorType *VDest = dyn_cast<VectorType>(DestTy);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(VSrc->getNumElements() == VDest->getNumElements(),
|
|
|
|
"PtrToInt Vector width mismatch", &I);
|
2011-12-05 14:29:09 +08:00
|
|
|
}
|
2006-11-27 09:05:10 +08:00
|
|
|
|
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
|
|
|
|
// Get the source and destination types
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcTy->getScalarType()->isIntegerTy(),
|
|
|
|
"IntToPtr source must be an integral", &I);
|
|
|
|
Assert(DestTy->getScalarType()->isPointerTy(),
|
|
|
|
"IntToPtr result must be a pointer", &I);
|
|
|
|
Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
|
|
|
|
&I);
|
2011-12-05 14:29:09 +08:00
|
|
|
if (SrcTy->isVectorTy()) {
|
|
|
|
VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
|
|
|
|
VectorType *VDest = dyn_cast<VectorType>(DestTy);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(VSrc->getNumElements() == VDest->getNumElements(),
|
|
|
|
"IntToPtr Vector width mismatch", &I);
|
2011-12-05 14:29:09 +08:00
|
|
|
}
|
2006-11-27 09:05:10 +08:00
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitBitCastInst(BitCastInst &I) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(
|
2014-12-17 03:29:29 +08:00
|
|
|
CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
|
|
|
|
"Invalid bitcast", &I);
|
2006-11-27 09:05:10 +08:00
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
2013-11-15 09:34:59 +08:00
|
|
|
void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
|
|
|
|
Type *SrcTy = I.getOperand(0)->getType();
|
|
|
|
Type *DestTy = I.getType();
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
|
|
|
|
&I);
|
|
|
|
Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
|
|
|
|
&I);
|
|
|
|
Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
|
|
|
|
"AddrSpaceCast must be between different address spaces", &I);
|
2013-11-15 09:34:59 +08:00
|
|
|
if (SrcTy->isVectorTy())
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(),
|
|
|
|
"AddrSpaceCast vector pointer number of elements mismatch", &I);
|
2013-11-15 09:34:59 +08:00
|
|
|
visitInstruction(I);
|
|
|
|
}
|
|
|
|
|
2004-03-02 08:22:19 +08:00
|
|
|
/// visitPHINode - Ensure that a PHI node is well formed.
|
|
|
|
///
|
2002-06-25 23:56:27 +08:00
|
|
|
void Verifier::visitPHINode(PHINode &PN) {
|
|
|
|
// Ensure that the PHI nodes are all grouped together at the top of the block.
|
|
|
|
// This can be tested by checking whether the instruction before this is
|
2003-10-11 01:54:14 +08:00
|
|
|
// either nonexistent (because this is begin()) or is a PHI node. If not,
|
2002-06-25 23:56:27 +08:00
|
|
|
// then there is some other instruction before a PHI.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(&PN == &PN.getParent()->front() ||
|
|
|
|
isa<PHINode>(--BasicBlock::iterator(&PN)),
|
|
|
|
"PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
|
2002-06-25 23:56:27 +08:00
|
|
|
|
2015-08-14 13:09:07 +08:00
|
|
|
// Check that a PHI doesn't yield a Token.
|
|
|
|
Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
|
|
|
|
|
2009-09-08 09:23:52 +08:00
|
|
|
// Check that all of the values of the PHI node have the same type as the
|
|
|
|
// result, and that the incoming blocks are really basic blocks.
|
2015-05-13 04:05:31 +08:00
|
|
|
for (Value *IncValue : PN.incoming_values()) {
|
|
|
|
Assert(PN.getType() == IncValue->getType(),
|
2015-03-08 05:15:40 +08:00
|
|
|
"PHI node operands are not the same type as the result!", &PN);
|
2009-09-08 09:23:52 +08:00
|
|
|
}
|
2003-11-12 15:13:37 +08:00
|
|
|
|
2003-10-06 01:44:18 +08:00
|
|
|
// All other PHI node constraints are checked in the visitBasicBlock method.
|
2002-04-19 04:37:37 +08:00
|
|
|
|
|
|
|
visitInstruction(PN);
|
|
|
|
}
|
2002-03-16 04:25:09 +08:00
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
void Verifier::verifyCallSite(CallSite CS) {
|
2007-12-22 03:19:01 +08:00
|
|
|
Instruction *I = CS.getInstruction();
|
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(CS.getCalledValue()->getType()->isPointerTy(),
|
|
|
|
"Called function must be a pointer!", I);
|
2011-07-18 12:54:35 +08:00
|
|
|
PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType());
|
2002-05-09 03:49:50 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(FPTy->getElementType()->isFunctionTy(),
|
|
|
|
"Called function is not pointer to function type!", I);
|
2015-04-24 05:36:23 +08:00
|
|
|
|
|
|
|
Assert(FPTy->getElementType() == CS.getFunctionType(),
|
|
|
|
"Called function is not the same type as the call!", I);
|
|
|
|
|
|
|
|
FunctionType *FTy = CS.getFunctionType();
|
2002-05-09 03:49:50 +08:00
|
|
|
|
|
|
|
// Verify that the correct number of arguments are being passed
|
|
|
|
if (FTy->isVarArg())
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(CS.arg_size() >= FTy->getNumParams(),
|
|
|
|
"Called function requires more parameters than were provided!", I);
|
2002-05-09 03:49:50 +08:00
|
|
|
else
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(CS.arg_size() == FTy->getNumParams(),
|
|
|
|
"Incorrect number of arguments passed to called function!", I);
|
2002-05-09 03:49:50 +08:00
|
|
|
|
2010-05-11 04:58:42 +08:00
|
|
|
// Verify that all arguments to the call match the function type.
|
2002-05-09 03:49:50 +08:00
|
|
|
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(CS.getArgument(i)->getType() == FTy->getParamType(i),
|
|
|
|
"Call parameter type does not match function signature!",
|
|
|
|
CS.getArgument(i), FTy->getParamType(i), I);
|
2007-12-22 03:19:01 +08:00
|
|
|
|
2013-04-19 04:15:25 +08:00
|
|
|
AttributeSet Attrs = CS.getAttributes();
|
2008-01-12 06:36:48 +08:00
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
Assert(verifyAttributeCount(Attrs, CS.arg_size()),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Attribute after last parameter!", I);
|
2008-01-12 06:36:48 +08:00
|
|
|
|
2007-12-22 03:19:01 +08:00
|
|
|
// Verify call attributes.
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyFunctionAttrs(FTy, Attrs, I);
|
2008-01-12 06:36:48 +08:00
|
|
|
|
2014-05-01 01:22:00 +08:00
|
|
|
// Conservatively check the inalloca argument.
|
|
|
|
// We have a bug if we can find that there is an underlying alloca without
|
|
|
|
// inalloca.
|
|
|
|
if (CS.hasInAllocaArgument()) {
|
|
|
|
Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1);
|
|
|
|
if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(AI->isUsedWithInAlloca(),
|
|
|
|
"inalloca argument for call has mismatched alloca", AI, I);
|
2014-05-01 01:22:00 +08:00
|
|
|
}
|
|
|
|
|
2016-04-02 05:41:15 +08:00
|
|
|
// For each argument of the callsite, if it has the swifterror argument,
|
|
|
|
// make sure the underlying alloca has swifterror as well.
|
|
|
|
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
|
|
|
|
if (CS.paramHasAttr(i+1, Attribute::SwiftError)) {
|
|
|
|
Value *SwiftErrorArg = CS.getArgument(i);
|
|
|
|
auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets());
|
|
|
|
Assert(AI, "swifterror argument should come from alloca", AI, I);
|
|
|
|
if (AI)
|
|
|
|
Assert(AI->isSwiftError(),
|
|
|
|
"swifterror argument for call has mismatched alloca", AI, I);
|
|
|
|
}
|
|
|
|
|
2013-04-20 13:14:40 +08:00
|
|
|
if (FTy->isVarArg()) {
|
|
|
|
// FIXME? is 'nest' even legal here?
|
|
|
|
bool SawNest = false;
|
|
|
|
bool SawReturned = false;
|
|
|
|
|
|
|
|
for (unsigned Idx = 1; Idx < 1 + FTy->getNumParams(); ++Idx) {
|
|
|
|
if (Attrs.hasAttribute(Idx, Attribute::Nest))
|
|
|
|
SawNest = true;
|
|
|
|
if (Attrs.hasAttribute(Idx, Attribute::Returned))
|
|
|
|
SawReturned = true;
|
|
|
|
}
|
|
|
|
|
2008-01-12 06:36:48 +08:00
|
|
|
// Check attributes on the varargs part.
|
|
|
|
for (unsigned Idx = 1 + FTy->getNumParams(); Idx <= CS.arg_size(); ++Idx) {
|
2013-07-21 01:46:00 +08:00
|
|
|
Type *Ty = CS.getArgument(Idx-1)->getType();
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyParameterAttrs(Attrs, Idx, Ty, false, I);
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2013-04-20 13:14:40 +08:00
|
|
|
if (Attrs.hasAttribute(Idx, Attribute::Nest)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!SawNest, "More than one parameter has attribute nest!", I);
|
2013-04-20 13:14:40 +08:00
|
|
|
SawNest = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Attrs.hasAttribute(Idx, Attribute::Returned)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!SawReturned, "More than one parameter has attribute returned!",
|
|
|
|
I);
|
|
|
|
Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
|
|
|
|
"Incompatible argument and return types for 'returned' "
|
|
|
|
"attribute",
|
|
|
|
I);
|
2013-04-20 13:14:40 +08:00
|
|
|
SawReturned = true;
|
|
|
|
}
|
2008-01-13 00:42:01 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!Attrs.hasAttribute(Idx, Attribute::StructRet),
|
|
|
|
"Attribute 'sret' cannot be used for vararg call arguments!", I);
|
2014-01-17 06:59:24 +08:00
|
|
|
|
|
|
|
if (Attrs.hasAttribute(Idx, Attribute::InAlloca))
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Idx == CS.arg_size(), "inalloca isn't on the last argument!", I);
|
2008-01-12 06:36:48 +08:00
|
|
|
}
|
2013-04-20 13:14:40 +08:00
|
|
|
}
|
2007-12-22 03:19:01 +08:00
|
|
|
|
2009-05-30 13:06:04 +08:00
|
|
|
// Verify that there's no metadata unless it's a direct call to an intrinsic.
|
2014-04-09 14:08:46 +08:00
|
|
|
if (CS.getCalledFunction() == nullptr ||
|
2010-05-11 04:58:42 +08:00
|
|
|
!CS.getCalledFunction()->getName().startswith("llvm.")) {
|
2015-08-14 13:09:07 +08:00
|
|
|
for (Type *ParamTy : FTy->params()) {
|
|
|
|
Assert(!ParamTy->isMetadataTy(),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Function has metadata parameter but isn't an intrinsic", I);
|
2015-08-14 13:09:07 +08:00
|
|
|
Assert(!ParamTy->isTokenTy(),
|
|
|
|
"Function has token parameter but isn't an intrinsic", I);
|
|
|
|
}
|
2009-05-30 13:06:04 +08:00
|
|
|
}
|
|
|
|
|
2015-08-14 13:09:07 +08:00
|
|
|
// Verify that indirect calls don't return tokens.
|
|
|
|
if (CS.getCalledFunction() == nullptr)
|
|
|
|
Assert(!FTy->getReturnType()->isTokenTy(),
|
|
|
|
"Return type cannot be token for indirect call!");
|
|
|
|
|
2015-06-27 05:39:44 +08:00
|
|
|
if (Function *F = CS.getCalledFunction())
|
|
|
|
if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
|
2015-06-27 06:21:52 +08:00
|
|
|
visitIntrinsicCallSite(ID, CS);
|
2015-06-27 05:39:44 +08:00
|
|
|
|
2016-01-21 03:50:25 +08:00
|
|
|
// Verify that a callsite has at most one "deopt", at most one "funclet" and
|
|
|
|
// at most one "gc-transition" operand bundle.
|
|
|
|
bool FoundDeoptBundle = false, FoundFuncletBundle = false,
|
|
|
|
FoundGCTransitionBundle = false;
|
2015-11-12 05:38:02 +08:00
|
|
|
for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) {
|
2015-12-16 05:27:27 +08:00
|
|
|
OperandBundleUse BU = CS.getOperandBundleAt(i);
|
|
|
|
uint32_t Tag = BU.getTagID();
|
|
|
|
if (Tag == LLVMContext::OB_deopt) {
|
2015-11-12 05:38:02 +08:00
|
|
|
Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I);
|
|
|
|
FoundDeoptBundle = true;
|
2016-01-21 03:50:25 +08:00
|
|
|
} else if (Tag == LLVMContext::OB_gc_transition) {
|
|
|
|
Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
|
|
|
|
I);
|
|
|
|
FoundGCTransitionBundle = true;
|
|
|
|
} else if (Tag == LLVMContext::OB_funclet) {
|
2015-12-16 05:27:27 +08:00
|
|
|
Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I);
|
|
|
|
FoundFuncletBundle = true;
|
|
|
|
Assert(BU.Inputs.size() == 1,
|
|
|
|
"Expected exactly one funclet bundle operand", I);
|
|
|
|
Assert(isa<FuncletPadInst>(BU.Inputs.front()),
|
|
|
|
"Funclet bundle operands should correspond to a FuncletPadInst",
|
|
|
|
I);
|
|
|
|
}
|
2015-11-12 05:38:02 +08:00
|
|
|
}
|
|
|
|
|
2016-04-25 06:23:13 +08:00
|
|
|
// Verify that each inlinable callsite of a debug-info-bearing function in a
|
|
|
|
// debug-info-bearing function has a debug location attached to it. Failure to
|
|
|
|
// do so causes assertion failures when the inliner sets up inline scope info.
|
|
|
|
if (I->getFunction()->getSubprogram() && CS.getCalledFunction() &&
|
|
|
|
CS.getCalledFunction()->getSubprogram())
|
|
|
|
Assert(I->getDebugLoc(), "inlinable function call in a function with debug "
|
|
|
|
"info must have a !dbg location",
|
|
|
|
I);
|
|
|
|
|
2007-12-22 03:19:01 +08:00
|
|
|
visitInstruction(*I);
|
|
|
|
}
|
|
|
|
|
2014-04-25 04:14:34 +08:00
|
|
|
/// Two types are "congruent" if they are identical, or if they are both pointer
|
|
|
|
/// types with different pointee types and the same address space.
|
|
|
|
static bool isTypeCongruent(Type *L, Type *R) {
|
|
|
|
if (L == R)
|
|
|
|
return true;
|
|
|
|
PointerType *PL = dyn_cast<PointerType>(L);
|
|
|
|
PointerType *PR = dyn_cast<PointerType>(R);
|
|
|
|
if (!PL || !PR)
|
|
|
|
return false;
|
|
|
|
return PL->getAddressSpace() == PR->getAddressSpace();
|
|
|
|
}
|
|
|
|
|
2014-05-16 07:58:57 +08:00
|
|
|
static AttrBuilder getParameterABIAttributes(int I, AttributeSet Attrs) {
|
|
|
|
static const Attribute::AttrKind ABIAttrs[] = {
|
|
|
|
Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
|
2016-04-02 05:41:15 +08:00
|
|
|
Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf,
|
|
|
|
Attribute::SwiftError};
|
2014-05-16 07:58:57 +08:00
|
|
|
AttrBuilder Copy;
|
|
|
|
for (auto AK : ABIAttrs) {
|
|
|
|
if (Attrs.hasAttribute(I + 1, AK))
|
|
|
|
Copy.addAttribute(AK);
|
|
|
|
}
|
|
|
|
if (Attrs.hasAttribute(I + 1, Attribute::Alignment))
|
|
|
|
Copy.addAlignmentAttr(Attrs.getParamAlignment(I + 1));
|
|
|
|
return Copy;
|
|
|
|
}
|
|
|
|
|
2014-04-25 04:14:34 +08:00
|
|
|
void Verifier::verifyMustTailCall(CallInst &CI) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
|
2014-04-25 04:14:34 +08:00
|
|
|
|
|
|
|
// - The caller and callee prototypes must match. Pointer types of
|
|
|
|
// parameters or return types may differ in pointee type, but not
|
|
|
|
// address space.
|
|
|
|
Function *F = CI.getParent()->getParent();
|
2015-04-25 05:16:07 +08:00
|
|
|
FunctionType *CallerTy = F->getFunctionType();
|
|
|
|
FunctionType *CalleeTy = CI.getFunctionType();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(),
|
|
|
|
"cannot guarantee tail call due to mismatched parameter counts", &CI);
|
|
|
|
Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(),
|
|
|
|
"cannot guarantee tail call due to mismatched varargs", &CI);
|
|
|
|
Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
|
|
|
|
"cannot guarantee tail call due to mismatched return types", &CI);
|
2014-04-25 04:14:34 +08:00
|
|
|
for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(
|
2014-04-25 04:14:34 +08:00
|
|
|
isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
|
|
|
|
"cannot guarantee tail call due to mismatched parameter types", &CI);
|
|
|
|
}
|
|
|
|
|
|
|
|
// - The calling conventions of the caller and callee must match.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(F->getCallingConv() == CI.getCallingConv(),
|
|
|
|
"cannot guarantee tail call due to mismatched calling conv", &CI);
|
2014-04-25 04:14:34 +08:00
|
|
|
|
|
|
|
// - All ABI-impacting function attributes, such as sret, byval, inreg,
|
|
|
|
// returned, and inalloca, must match.
|
|
|
|
AttributeSet CallerAttrs = F->getAttributes();
|
|
|
|
AttributeSet CalleeAttrs = CI.getAttributes();
|
|
|
|
for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
|
2014-05-16 07:58:57 +08:00
|
|
|
AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
|
|
|
|
AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(CallerABIAttrs == CalleeABIAttrs,
|
|
|
|
"cannot guarantee tail call due to mismatched ABI impacting "
|
|
|
|
"function attributes",
|
|
|
|
&CI, CI.getOperand(I));
|
2014-04-25 04:14:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// - The call must immediately precede a :ref:`ret <i_ret>` instruction,
|
|
|
|
// or a pointer bitcast followed by a ret instruction.
|
|
|
|
// - The ret instruction must return the (possibly bitcasted) value
|
|
|
|
// produced by the call or void.
|
|
|
|
Value *RetVal = &CI;
|
|
|
|
Instruction *Next = CI.getNextNode();
|
|
|
|
|
|
|
|
// Handle the optional bitcast.
|
|
|
|
if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(BI->getOperand(0) == RetVal,
|
|
|
|
"bitcast following musttail call must use the call", BI);
|
2014-04-25 04:14:34 +08:00
|
|
|
RetVal = BI;
|
|
|
|
Next = BI->getNextNode();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the return.
|
|
|
|
ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Ret, "musttail call must be precede a ret with an optional bitcast",
|
|
|
|
&CI);
|
|
|
|
Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
|
|
|
|
"musttail call result must be returned", Ret);
|
2014-04-25 04:14:34 +08:00
|
|
|
}
|
|
|
|
|
2007-12-22 03:19:01 +08:00
|
|
|
void Verifier::visitCallInst(CallInst &CI) {
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyCallSite(&CI);
|
2002-07-18 08:13:42 +08:00
|
|
|
|
2014-04-25 04:14:34 +08:00
|
|
|
if (CI.isMustTailCall())
|
|
|
|
verifyMustTailCall(CI);
|
2007-12-22 03:19:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitInvokeInst(InvokeInst &II) {
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyCallSite(&II);
|
2011-09-22 06:57:02 +08:00
|
|
|
|
2015-08-01 01:58:14 +08:00
|
|
|
// Verify that the first non-PHI instruction of the unwind destination is an
|
|
|
|
// exception handling instruction.
|
|
|
|
Assert(
|
|
|
|
II.getUnwindDest()->isEHPad(),
|
|
|
|
"The unwind destination does not have an exception handling instruction!",
|
|
|
|
&II);
|
2011-09-22 06:57:02 +08:00
|
|
|
|
2010-08-03 07:09:14 +08:00
|
|
|
visitTerminatorInst(II);
|
2002-04-19 06:11:52 +08:00
|
|
|
}
|
2002-04-19 04:37:37 +08:00
|
|
|
|
2004-03-02 08:22:19 +08:00
|
|
|
/// visitBinaryOperator - Check that both arguments to the binary operator are
|
|
|
|
/// of the same type!
|
|
|
|
///
|
2002-06-25 23:56:27 +08:00
|
|
|
void Verifier::visitBinaryOperator(BinaryOperator &B) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
|
|
|
|
"Both operands to a binary operator are not of the same type!", &B);
|
2002-09-10 04:26:04 +08:00
|
|
|
|
2007-02-02 10:16:23 +08:00
|
|
|
switch (B.getOpcode()) {
|
2009-06-06 00:10:00 +08:00
|
|
|
// Check that integer arithmetic operators are only used with
|
|
|
|
// integral operands.
|
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::Mul:
|
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::SRem:
|
|
|
|
case Instruction::URem:
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(B.getType()->isIntOrIntVectorTy(),
|
|
|
|
"Integer arithmetic operators only work with integral types!", &B);
|
|
|
|
Assert(B.getType() == B.getOperand(0)->getType(),
|
|
|
|
"Integer arithmetic operators must have same type "
|
|
|
|
"for operands and result!",
|
|
|
|
&B);
|
2009-06-06 00:10:00 +08:00
|
|
|
break;
|
|
|
|
// Check that floating-point arithmetic operators are only used with
|
|
|
|
// floating-point operands.
|
|
|
|
case Instruction::FAdd:
|
|
|
|
case Instruction::FSub:
|
|
|
|
case Instruction::FMul:
|
|
|
|
case Instruction::FDiv:
|
|
|
|
case Instruction::FRem:
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(B.getType()->isFPOrFPVectorTy(),
|
|
|
|
"Floating-point arithmetic operators only work with "
|
|
|
|
"floating-point types!",
|
|
|
|
&B);
|
|
|
|
Assert(B.getType() == B.getOperand(0)->getType(),
|
|
|
|
"Floating-point arithmetic operators must have same type "
|
|
|
|
"for operands and result!",
|
|
|
|
&B);
|
2009-06-06 00:10:00 +08:00
|
|
|
break;
|
2002-09-10 04:26:04 +08:00
|
|
|
// Check that logical operators are only used with integral operands.
|
2007-02-02 10:16:23 +08:00
|
|
|
case Instruction::And:
|
|
|
|
case Instruction::Or:
|
|
|
|
case Instruction::Xor:
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(B.getType()->isIntOrIntVectorTy(),
|
|
|
|
"Logical operators only work with integral types!", &B);
|
|
|
|
Assert(B.getType() == B.getOperand(0)->getType(),
|
|
|
|
"Logical operators must have same type for operands and result!",
|
|
|
|
&B);
|
2007-02-02 10:16:23 +08:00
|
|
|
break;
|
|
|
|
case Instruction::Shl:
|
|
|
|
case Instruction::LShr:
|
|
|
|
case Instruction::AShr:
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(B.getType()->isIntOrIntVectorTy(),
|
|
|
|
"Shifts only work with integral types!", &B);
|
|
|
|
Assert(B.getType() == B.getOperand(0)->getType(),
|
|
|
|
"Shift return type must be same as operands!", &B);
|
2007-02-02 10:16:23 +08:00
|
|
|
break;
|
2009-06-06 00:10:00 +08:00
|
|
|
default:
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("Unknown BinaryOperator opcode!");
|
2002-09-10 04:26:04 +08:00
|
|
|
}
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2002-04-19 04:37:37 +08:00
|
|
|
visitInstruction(B);
|
2002-02-21 01:55:43 +08:00
|
|
|
}
|
|
|
|
|
2010-08-23 07:45:14 +08:00
|
|
|
void Verifier::visitICmpInst(ICmpInst &IC) {
|
2006-11-20 09:22:35 +08:00
|
|
|
// Check that the operands are the same type
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *Op0Ty = IC.getOperand(0)->getType();
|
|
|
|
Type *Op1Ty = IC.getOperand(1)->getType();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Op0Ty == Op1Ty,
|
|
|
|
"Both operands to ICmp instruction are not of the same type!", &IC);
|
2006-11-20 09:22:35 +08:00
|
|
|
// Check that the operands are the right type
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->getScalarType()->isPointerTy(),
|
|
|
|
"Invalid operand types for ICmp instruction", &IC);
|
2010-08-23 07:45:14 +08:00
|
|
|
// Check that the predicate is valid.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
|
|
|
|
IC.getPredicate() <= CmpInst::LAST_ICMP_PREDICATE,
|
|
|
|
"Invalid predicate in ICmp instruction!", &IC);
|
2009-05-30 13:06:04 +08:00
|
|
|
|
2006-11-20 09:22:35 +08:00
|
|
|
visitInstruction(IC);
|
|
|
|
}
|
|
|
|
|
2010-08-23 07:45:14 +08:00
|
|
|
void Verifier::visitFCmpInst(FCmpInst &FC) {
|
2006-11-20 09:22:35 +08:00
|
|
|
// Check that the operands are the same type
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *Op0Ty = FC.getOperand(0)->getType();
|
|
|
|
Type *Op1Ty = FC.getOperand(1)->getType();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Op0Ty == Op1Ty,
|
|
|
|
"Both operands to FCmp instruction are not of the same type!", &FC);
|
2006-11-20 09:22:35 +08:00
|
|
|
// Check that the operands are the right type
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Op0Ty->isFPOrFPVectorTy(),
|
|
|
|
"Invalid operand types for FCmp instruction", &FC);
|
2010-08-23 07:45:14 +08:00
|
|
|
// Check that the predicate is valid.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(FC.getPredicate() >= CmpInst::FIRST_FCMP_PREDICATE &&
|
|
|
|
FC.getPredicate() <= CmpInst::LAST_FCMP_PREDICATE,
|
|
|
|
"Invalid predicate in FCmp instruction!", &FC);
|
2010-08-23 07:45:14 +08:00
|
|
|
|
2006-11-20 09:22:35 +08:00
|
|
|
visitInstruction(FC);
|
|
|
|
}
|
|
|
|
|
2006-01-11 03:05:34 +08:00
|
|
|
void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(
|
|
|
|
ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)),
|
|
|
|
"Invalid extractelement operands!", &EI);
|
2006-01-11 03:05:34 +08:00
|
|
|
visitInstruction(EI);
|
|
|
|
}
|
|
|
|
|
2006-01-18 04:07:22 +08:00
|
|
|
void Verifier::visitInsertElementInst(InsertElementInst &IE) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
|
|
|
|
IE.getOperand(2)),
|
|
|
|
"Invalid insertelement operands!", &IE);
|
2006-01-18 04:07:22 +08:00
|
|
|
visitInstruction(IE);
|
|
|
|
}
|
|
|
|
|
2006-04-08 09:18:18 +08:00
|
|
|
void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
|
|
|
|
SV.getOperand(2)),
|
|
|
|
"Invalid shufflevector operands!", &SV);
|
2006-04-08 09:18:18 +08:00
|
|
|
visitInstruction(SV);
|
|
|
|
}
|
|
|
|
|
2002-06-25 23:56:27 +08:00
|
|
|
void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
2012-02-04 01:28:51 +08:00
|
|
|
Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
|
2011-12-05 14:29:09 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(isa<PointerType>(TargetTy),
|
|
|
|
"GEP base pointer is not a vector or a vector of pointers", &GEP);
|
2015-04-18 06:32:17 +08:00
|
|
|
Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
|
2007-02-10 16:30:29 +08:00
|
|
|
SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *ElTy =
|
2015-03-31 05:41:43 +08:00
|
|
|
GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
|
2011-12-05 14:29:09 +08:00
|
|
|
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(GEP.getType()->getScalarType()->isPointerTy() &&
|
2015-04-18 06:32:20 +08:00
|
|
|
GEP.getResultElementType() == ElTy,
|
2015-03-08 05:15:40 +08:00
|
|
|
"GEP is not of right type for indices!", &GEP, ElTy);
|
2012-11-13 20:59:33 +08:00
|
|
|
|
2015-07-09 15:42:48 +08:00
|
|
|
if (GEP.getType()->isVectorTy()) {
|
2012-11-13 20:59:33 +08:00
|
|
|
// Additional checks for vector GEPs.
|
2015-07-09 15:42:48 +08:00
|
|
|
unsigned GEPWidth = GEP.getType()->getVectorNumElements();
|
|
|
|
if (GEP.getPointerOperandType()->isVectorTy())
|
|
|
|
Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(),
|
|
|
|
"Vector GEP result width doesn't match operand's", &GEP);
|
2016-02-26 00:44:27 +08:00
|
|
|
for (Value *Idx : Idxs) {
|
|
|
|
Type *IndexTy = Idx->getType();
|
2015-07-09 15:42:48 +08:00
|
|
|
if (IndexTy->isVectorTy()) {
|
|
|
|
unsigned IndexWidth = IndexTy->getVectorNumElements();
|
|
|
|
Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
|
|
|
|
}
|
|
|
|
Assert(IndexTy->getScalarType()->isIntegerTy(),
|
|
|
|
"All GEP indices should be of integer type");
|
2012-11-13 20:59:33 +08:00
|
|
|
}
|
2011-12-05 14:29:09 +08:00
|
|
|
}
|
2002-04-25 03:12:21 +08:00
|
|
|
visitInstruction(GEP);
|
|
|
|
}
|
|
|
|
|
2012-06-01 00:04:26 +08:00
|
|
|
static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
|
|
|
|
return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
|
|
|
|
}
|
|
|
|
|
2014-10-21 07:52:07 +08:00
|
|
|
void Verifier::visitRangeMetadata(Instruction& I,
|
|
|
|
MDNode* Range, Type* Ty) {
|
|
|
|
assert(Range &&
|
|
|
|
Range == I.getMetadata(LLVMContext::MD_range) &&
|
|
|
|
"precondition violation");
|
|
|
|
|
|
|
|
unsigned NumOperands = Range->getNumOperands();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(NumOperands % 2 == 0, "Unfinished range!", Range);
|
2014-10-21 07:52:07 +08:00
|
|
|
unsigned NumRanges = NumOperands / 2;
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(NumRanges >= 1, "It should have at least one range!", Range);
|
|
|
|
|
2014-10-21 07:52:07 +08:00
|
|
|
ConstantRange LastRange(1); // Dummy initial value
|
|
|
|
for (unsigned i = 0; i < NumRanges; ++i) {
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
ConstantInt *Low =
|
|
|
|
mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Low, "The lower limit must be an integer!", Low);
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
ConstantInt *High =
|
|
|
|
mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(High, "The upper limit must be an integer!", High);
|
|
|
|
Assert(High->getType() == Low->getType() && High->getType() == Ty,
|
|
|
|
"Range types must match instruction type!", &I);
|
|
|
|
|
2014-10-21 07:52:07 +08:00
|
|
|
APInt HighV = High->getValue();
|
|
|
|
APInt LowV = Low->getValue();
|
|
|
|
ConstantRange CurRange(LowV, HighV);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(),
|
|
|
|
"Range must not be empty!", Range);
|
2014-10-21 07:52:07 +08:00
|
|
|
if (i != 0) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(CurRange.intersectWith(LastRange).isEmptySet(),
|
|
|
|
"Intervals are overlapping", Range);
|
|
|
|
Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
|
|
|
|
Range);
|
|
|
|
Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
|
|
|
|
Range);
|
2014-10-21 07:52:07 +08:00
|
|
|
}
|
|
|
|
LastRange = ConstantRange(LowV, HighV);
|
|
|
|
}
|
|
|
|
if (NumRanges > 2) {
|
|
|
|
APInt FirstLow =
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
|
2014-10-21 07:52:07 +08:00
|
|
|
APInt FirstHigh =
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
|
2014-10-21 07:52:07 +08:00
|
|
|
ConstantRange FirstRange(FirstLow, FirstHigh);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(FirstRange.intersectWith(LastRange).isEmptySet(),
|
|
|
|
"Intervals are overlapping", Range);
|
|
|
|
Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
|
|
|
|
Range);
|
2014-10-21 07:52:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-18 06:09:19 +08:00
|
|
|
void Verifier::checkAtomicMemAccessSize(const Module *M, Type *Ty,
|
|
|
|
const Instruction *I) {
|
|
|
|
unsigned Size = M->getDataLayout().getTypeSizeInBits(Ty);
|
|
|
|
Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
|
|
|
|
Assert(!(Size & (Size - 1)),
|
|
|
|
"atomic memory access' operand must have a power-of-two size", Ty, I);
|
|
|
|
}
|
|
|
|
|
2002-06-25 23:56:27 +08:00
|
|
|
void Verifier::visitLoadInst(LoadInst &LI) {
|
2011-07-18 12:54:35 +08:00
|
|
|
PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(PTy, "Load operand must be a pointer.", &LI);
|
2015-04-07 04:59:48 +08:00
|
|
|
Type *ElTy = LI.getType();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(LI.getAlignment() <= Value::MaximumAlignment,
|
|
|
|
"huge alignment values are unsupported", &LI);
|
2011-08-10 07:02:53 +08:00
|
|
|
if (LI.isAtomic()) {
|
2016-04-07 05:19:33 +08:00
|
|
|
Assert(LI.getOrdering() != AtomicOrdering::Release &&
|
|
|
|
LI.getOrdering() != AtomicOrdering::AcquireRelease,
|
2015-03-08 05:15:40 +08:00
|
|
|
"Load cannot have Release ordering", &LI);
|
|
|
|
Assert(LI.getAlignment() != 0,
|
|
|
|
"Atomic load must specify explicit alignment", &LI);
|
2015-12-18 06:09:19 +08:00
|
|
|
Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() ||
|
|
|
|
ElTy->isFloatingPointTy(),
|
|
|
|
"atomic load operand must have integer, pointer, or floating point "
|
|
|
|
"type!",
|
|
|
|
ElTy, &LI);
|
|
|
|
checkAtomicMemAccessSize(M, ElTy, &LI);
|
2011-08-10 07:02:53 +08:00
|
|
|
} else {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(LI.getSynchScope() == CrossThread,
|
|
|
|
"Non-atomic load cannot have SynchronizationScope specified", &LI);
|
2011-08-10 07:02:53 +08:00
|
|
|
}
|
2012-03-24 08:14:51 +08:00
|
|
|
|
2002-04-25 03:12:21 +08:00
|
|
|
visitInstruction(LI);
|
|
|
|
}
|
|
|
|
|
2002-06-25 23:56:27 +08:00
|
|
|
void Verifier::visitStoreInst(StoreInst &SI) {
|
2011-07-18 12:54:35 +08:00
|
|
|
PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(PTy, "Store operand must be a pointer.", &SI);
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *ElTy = PTy->getElementType();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ElTy == SI.getOperand(0)->getType(),
|
|
|
|
"Stored value type does not match pointer operand type!", &SI, ElTy);
|
|
|
|
Assert(SI.getAlignment() <= Value::MaximumAlignment,
|
|
|
|
"huge alignment values are unsupported", &SI);
|
2011-08-10 07:02:53 +08:00
|
|
|
if (SI.isAtomic()) {
|
2016-04-07 05:19:33 +08:00
|
|
|
Assert(SI.getOrdering() != AtomicOrdering::Acquire &&
|
|
|
|
SI.getOrdering() != AtomicOrdering::AcquireRelease,
|
2015-03-08 05:15:40 +08:00
|
|
|
"Store cannot have Acquire ordering", &SI);
|
|
|
|
Assert(SI.getAlignment() != 0,
|
|
|
|
"Atomic store must specify explicit alignment", &SI);
|
2015-12-18 06:09:19 +08:00
|
|
|
Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() ||
|
|
|
|
ElTy->isFloatingPointTy(),
|
|
|
|
"atomic store operand must have integer, pointer, or floating point "
|
|
|
|
"type!",
|
|
|
|
ElTy, &SI);
|
|
|
|
checkAtomicMemAccessSize(M, ElTy, &SI);
|
2011-08-10 07:02:53 +08:00
|
|
|
} else {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(SI.getSynchScope() == CrossThread,
|
|
|
|
"Non-atomic store cannot have SynchronizationScope specified", &SI);
|
2011-08-10 07:02:53 +08:00
|
|
|
}
|
2002-04-25 03:12:21 +08:00
|
|
|
visitInstruction(SI);
|
|
|
|
}
|
|
|
|
|
2016-04-02 05:41:15 +08:00
|
|
|
/// Check that SwiftErrorVal is used as a swifterror argument in CS.
|
|
|
|
void Verifier::verifySwiftErrorCallSite(CallSite CS,
|
|
|
|
const Value *SwiftErrorVal) {
|
|
|
|
unsigned Idx = 0;
|
|
|
|
for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
|
|
|
|
I != E; ++I, ++Idx) {
|
|
|
|
if (*I == SwiftErrorVal) {
|
|
|
|
Assert(CS.paramHasAttr(Idx+1, Attribute::SwiftError),
|
|
|
|
"swifterror value when used in a callsite should be marked "
|
|
|
|
"with swifterror attribute",
|
|
|
|
SwiftErrorVal, CS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
|
|
|
|
// Check that swifterror value is only used by loads, stores, or as
|
|
|
|
// a swifterror argument.
|
|
|
|
for (const User *U : SwiftErrorVal->users()) {
|
|
|
|
Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
|
|
|
|
isa<InvokeInst>(U),
|
|
|
|
"swifterror value can only be loaded and stored from, or "
|
|
|
|
"as a swifterror argument!",
|
|
|
|
SwiftErrorVal, U);
|
|
|
|
// If it is used by a store, check it is the second operand.
|
|
|
|
if (auto StoreI = dyn_cast<StoreInst>(U))
|
|
|
|
Assert(StoreI->getOperand(1) == SwiftErrorVal,
|
|
|
|
"swifterror value should be the second operand when used "
|
|
|
|
"by stores", SwiftErrorVal, U);
|
|
|
|
if (auto CallI = dyn_cast<CallInst>(U))
|
|
|
|
verifySwiftErrorCallSite(const_cast<CallInst*>(CallI), SwiftErrorVal);
|
|
|
|
if (auto II = dyn_cast<InvokeInst>(U))
|
|
|
|
verifySwiftErrorCallSite(const_cast<InvokeInst*>(II), SwiftErrorVal);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-24 05:09:37 +08:00
|
|
|
void Verifier::visitAllocaInst(AllocaInst &AI) {
|
2015-08-02 06:20:21 +08:00
|
|
|
SmallPtrSet<Type*, 4> Visited;
|
2011-07-18 12:54:35 +08:00
|
|
|
PointerType *PTy = AI.getType();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(PTy->getAddressSpace() == 0,
|
|
|
|
"Allocation instruction pointer not in the generic address space!",
|
|
|
|
&AI);
|
2015-04-25 05:16:07 +08:00
|
|
|
Assert(AI.getAllocatedType()->isSized(&Visited),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Cannot allocate unsized type", &AI);
|
|
|
|
Assert(AI.getArraySize()->getType()->isIntegerTy(),
|
|
|
|
"Alloca array size must have integer type", &AI);
|
|
|
|
Assert(AI.getAlignment() <= Value::MaximumAlignment,
|
|
|
|
"huge alignment values are unsupported", &AI);
|
2013-12-19 10:14:12 +08:00
|
|
|
|
2016-04-02 05:41:15 +08:00
|
|
|
if (AI.isSwiftError()) {
|
|
|
|
verifySwiftErrorValue(&AI);
|
|
|
|
}
|
|
|
|
|
2007-12-17 09:00:21 +08:00
|
|
|
visitInstruction(AI);
|
|
|
|
}
|
|
|
|
|
2011-07-29 05:48:00 +08:00
|
|
|
void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
|
2014-03-11 18:48:52 +08:00
|
|
|
|
|
|
|
// FIXME: more conditions???
|
2016-04-07 05:19:33 +08:00
|
|
|
Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic,
|
2015-03-08 05:15:40 +08:00
|
|
|
"cmpxchg instructions must be atomic.", &CXI);
|
2016-04-07 05:19:33 +08:00
|
|
|
Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic,
|
2015-03-08 05:15:40 +08:00
|
|
|
"cmpxchg instructions must be atomic.", &CXI);
|
2016-04-07 05:19:33 +08:00
|
|
|
Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered,
|
2015-03-08 05:15:40 +08:00
|
|
|
"cmpxchg instructions cannot be unordered.", &CXI);
|
2016-04-07 05:19:33 +08:00
|
|
|
Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered,
|
2015-03-08 05:15:40 +08:00
|
|
|
"cmpxchg instructions cannot be unordered.", &CXI);
|
2016-04-07 05:19:33 +08:00
|
|
|
Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()),
|
|
|
|
"cmpxchg instructions failure argument shall be no stronger than the "
|
|
|
|
"success argument",
|
2015-03-08 05:15:40 +08:00
|
|
|
&CXI);
|
2016-04-07 05:19:33 +08:00
|
|
|
Assert(CXI.getFailureOrdering() != AtomicOrdering::Release &&
|
|
|
|
CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease,
|
2015-03-08 05:15:40 +08:00
|
|
|
"cmpxchg failure ordering cannot include release semantics", &CXI);
|
2014-03-11 18:48:52 +08:00
|
|
|
|
2011-07-29 05:48:00 +08:00
|
|
|
PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
|
2011-07-29 05:48:00 +08:00
|
|
|
Type *ElTy = PTy->getElementType();
|
2016-02-19 08:06:41 +08:00
|
|
|
Assert(ElTy->isIntegerTy() || ElTy->isPointerTy(),
|
|
|
|
"cmpxchg operand must have integer or pointer type",
|
|
|
|
ElTy, &CXI);
|
2015-12-18 06:09:19 +08:00
|
|
|
checkAtomicMemAccessSize(M, ElTy, &CXI);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ElTy == CXI.getOperand(1)->getType(),
|
|
|
|
"Expected value type does not match pointer operand type!", &CXI,
|
|
|
|
ElTy);
|
|
|
|
Assert(ElTy == CXI.getOperand(2)->getType(),
|
|
|
|
"Stored value type does not match pointer operand type!", &CXI, ElTy);
|
2011-07-29 05:48:00 +08:00
|
|
|
visitInstruction(CXI);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
|
2016-04-07 05:19:33 +08:00
|
|
|
Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic,
|
2015-03-08 05:15:40 +08:00
|
|
|
"atomicrmw instructions must be atomic.", &RMWI);
|
2016-04-07 05:19:33 +08:00
|
|
|
Assert(RMWI.getOrdering() != AtomicOrdering::Unordered,
|
2015-03-08 05:15:40 +08:00
|
|
|
"atomicrmw instructions cannot be unordered.", &RMWI);
|
2011-07-29 05:48:00 +08:00
|
|
|
PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
|
2011-07-29 05:48:00 +08:00
|
|
|
Type *ElTy = PTy->getElementType();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ElTy->isIntegerTy(), "atomicrmw operand must have integer type!",
|
|
|
|
&RMWI, ElTy);
|
2015-12-18 06:09:19 +08:00
|
|
|
checkAtomicMemAccessSize(M, ElTy, &RMWI);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ElTy == RMWI.getOperand(1)->getType(),
|
|
|
|
"Argument value type does not match pointer operand type!", &RMWI,
|
|
|
|
ElTy);
|
|
|
|
Assert(AtomicRMWInst::FIRST_BINOP <= RMWI.getOperation() &&
|
|
|
|
RMWI.getOperation() <= AtomicRMWInst::LAST_BINOP,
|
|
|
|
"Invalid binary operation!", &RMWI);
|
2011-07-29 05:48:00 +08:00
|
|
|
visitInstruction(RMWI);
|
|
|
|
}
|
|
|
|
|
2011-07-26 07:16:38 +08:00
|
|
|
void Verifier::visitFenceInst(FenceInst &FI) {
|
|
|
|
const AtomicOrdering Ordering = FI.getOrdering();
|
2016-04-07 05:19:33 +08:00
|
|
|
Assert(Ordering == AtomicOrdering::Acquire ||
|
|
|
|
Ordering == AtomicOrdering::Release ||
|
|
|
|
Ordering == AtomicOrdering::AcquireRelease ||
|
|
|
|
Ordering == AtomicOrdering::SequentiallyConsistent,
|
|
|
|
"fence instructions may only have acquire, release, acq_rel, or "
|
|
|
|
"seq_cst ordering.",
|
2015-03-08 05:15:40 +08:00
|
|
|
&FI);
|
2011-07-26 07:16:38 +08:00
|
|
|
visitInstruction(FI);
|
|
|
|
}
|
|
|
|
|
2008-07-23 08:34:11 +08:00
|
|
|
void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
|
|
|
|
EVI.getIndices()) == EVI.getType(),
|
|
|
|
"Invalid ExtractValueInst operands!", &EVI);
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2008-07-23 08:34:11 +08:00
|
|
|
visitInstruction(EVI);
|
2008-02-20 06:15:16 +08:00
|
|
|
}
|
|
|
|
|
2008-07-23 08:34:11 +08:00
|
|
|
void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(),
|
|
|
|
IVI.getIndices()) ==
|
|
|
|
IVI.getOperand(1)->getType(),
|
|
|
|
"Invalid InsertValueInst operands!", &IVI);
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2008-07-23 08:34:11 +08:00
|
|
|
visitInstruction(IVI);
|
|
|
|
}
|
2002-04-19 04:37:37 +08:00
|
|
|
|
2016-01-10 12:28:38 +08:00
|
|
|
static Value *getParentPad(Value *EHPad) {
|
|
|
|
if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
|
|
|
|
return FPI->getParentPad();
|
|
|
|
|
|
|
|
return cast<CatchSwitchInst>(EHPad)->getParentPad();
|
|
|
|
}
|
|
|
|
|
2015-08-11 10:48:30 +08:00
|
|
|
void Verifier::visitEHPadPredecessors(Instruction &I) {
|
|
|
|
assert(I.isEHPad());
|
|
|
|
|
|
|
|
BasicBlock *BB = I.getParent();
|
|
|
|
Function *F = BB->getParent();
|
|
|
|
|
|
|
|
Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
|
|
|
|
|
|
|
|
if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
|
|
|
|
// The landingpad instruction defines its parent as a landing pad block. The
|
|
|
|
// landing pad block may be branched to only by the unwind edge of an
|
|
|
|
// invoke.
|
|
|
|
for (BasicBlock *PredBB : predecessors(BB)) {
|
|
|
|
const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
|
|
|
|
Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
|
|
|
|
"Block containing LandingPadInst must be jumped to "
|
|
|
|
"only by the unwind edge of an invoke.",
|
|
|
|
LPI);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
|
|
|
|
if (!pred_empty(BB))
|
|
|
|
Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
|
|
|
|
"Block containg CatchPadInst must be jumped to "
|
|
|
|
"only by its catchswitch.",
|
|
|
|
CPI);
|
2016-01-10 12:32:03 +08:00
|
|
|
Assert(BB != CPI->getCatchSwitch()->getUnwindDest(),
|
|
|
|
"Catchswitch cannot unwind to one of its catchpads",
|
|
|
|
CPI->getCatchSwitch(), CPI);
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
return;
|
|
|
|
}
|
2015-08-11 10:48:30 +08:00
|
|
|
|
2016-01-10 12:28:38 +08:00
|
|
|
// Verify that each pred has a legal terminator with a legal to/from EH
|
|
|
|
// pad relationship.
|
|
|
|
Instruction *ToPad = &I;
|
|
|
|
Value *ToPadParent = getParentPad(ToPad);
|
2015-08-11 10:48:30 +08:00
|
|
|
for (BasicBlock *PredBB : predecessors(BB)) {
|
|
|
|
TerminatorInst *TI = PredBB->getTerminator();
|
2016-01-10 12:28:38 +08:00
|
|
|
Value *FromPad;
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
if (auto *II = dyn_cast<InvokeInst>(TI)) {
|
2015-08-11 10:48:30 +08:00
|
|
|
Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
|
2016-01-10 12:28:38 +08:00
|
|
|
"EH pad must be jumped to via an unwind edge", ToPad, II);
|
|
|
|
if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
|
|
|
|
FromPad = Bundle->Inputs[0];
|
|
|
|
else
|
|
|
|
FromPad = ConstantTokenNone::get(II->getContext());
|
|
|
|
} else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
|
2016-03-02 02:59:50 +08:00
|
|
|
FromPad = CRI->getOperand(0);
|
2016-01-10 12:28:38 +08:00
|
|
|
Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
|
|
|
|
} else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
|
|
|
|
FromPad = CSI;
|
|
|
|
} else {
|
|
|
|
Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
|
|
|
|
}
|
|
|
|
|
|
|
|
// The edge may exit from zero or more nested pads.
|
2016-03-01 09:19:05 +08:00
|
|
|
SmallSet<Value *, 8> Seen;
|
2016-01-10 12:28:38 +08:00
|
|
|
for (;; FromPad = getParentPad(FromPad)) {
|
|
|
|
Assert(FromPad != ToPad,
|
|
|
|
"EH pad cannot handle exceptions raised within it", FromPad, TI);
|
|
|
|
if (FromPad == ToPadParent) {
|
|
|
|
// This is a legal unwind edge.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Assert(!isa<ConstantTokenNone>(FromPad),
|
|
|
|
"A single unwind edge may only enter one EH pad", TI);
|
2016-03-01 09:19:05 +08:00
|
|
|
Assert(Seen.insert(FromPad).second,
|
|
|
|
"EH pad jumps through a cycle of pads", FromPad);
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
}
|
2015-08-11 10:48:30 +08:00
|
|
|
}
|
|
|
|
}
|
2011-08-13 04:24:12 +08:00
|
|
|
|
2015-08-11 10:48:30 +08:00
|
|
|
void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
|
2011-08-13 04:24:12 +08:00
|
|
|
// The landingpad instruction is ill-formed if it doesn't have any clauses and
|
|
|
|
// isn't a cleanup.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(),
|
|
|
|
"LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
|
2011-08-13 04:24:12 +08:00
|
|
|
|
2015-08-11 10:48:30 +08:00
|
|
|
visitEHPadPredecessors(LPI);
|
2011-08-13 04:24:12 +08:00
|
|
|
|
2015-08-01 01:58:14 +08:00
|
|
|
if (!LandingPadResultTy)
|
|
|
|
LandingPadResultTy = LPI.getType();
|
|
|
|
else
|
|
|
|
Assert(LandingPadResultTy == LPI.getType(),
|
|
|
|
"The landingpad instruction should have a consistent result type "
|
|
|
|
"inside a function.",
|
|
|
|
&LPI);
|
|
|
|
|
2015-06-18 04:52:32 +08:00
|
|
|
Function *F = LPI.getParent()->getParent();
|
|
|
|
Assert(F->hasPersonalityFn(),
|
|
|
|
"LandingPadInst needs to be in a function with a personality.", &LPI);
|
|
|
|
|
2011-08-13 04:24:12 +08:00
|
|
|
// The landingpad instruction must be the first non-PHI instruction in the
|
|
|
|
// block.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(LPI.getParent()->getLandingPadInst() == &LPI,
|
|
|
|
"LandingPadInst not the first non-PHI instruction in the block.",
|
|
|
|
&LPI);
|
2011-08-13 04:24:12 +08:00
|
|
|
|
2011-09-28 00:43:19 +08:00
|
|
|
for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
|
2014-06-05 02:51:31 +08:00
|
|
|
Constant *Clause = LPI.getClause(i);
|
2011-09-28 03:34:22 +08:00
|
|
|
if (LPI.isCatch(i)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(isa<PointerType>(Clause->getType()),
|
|
|
|
"Catch operand does not have pointer type!", &LPI);
|
2011-09-28 03:34:22 +08:00
|
|
|
} else {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
|
|
|
|
Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
|
|
|
|
"Filter operand is not an array of constants!", &LPI);
|
2011-09-28 03:34:22 +08:00
|
|
|
}
|
2011-09-28 00:43:19 +08:00
|
|
|
}
|
|
|
|
|
2011-08-13 04:24:12 +08:00
|
|
|
visitInstruction(LPI);
|
|
|
|
}
|
|
|
|
|
2015-08-01 01:58:14 +08:00
|
|
|
void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
|
2015-08-11 10:48:30 +08:00
|
|
|
BasicBlock *BB = CPI.getParent();
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
|
2015-08-01 01:58:14 +08:00
|
|
|
Function *F = BB->getParent();
|
|
|
|
Assert(F->hasPersonalityFn(),
|
|
|
|
"CatchPadInst needs to be in a function with a personality.", &CPI);
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
Assert(isa<CatchSwitchInst>(CPI.getParentPad()),
|
|
|
|
"CatchPadInst needs to be directly nested in a CatchSwitchInst.",
|
|
|
|
CPI.getParentPad());
|
|
|
|
|
2015-08-01 01:58:14 +08:00
|
|
|
// The catchpad instruction must be the first non-PHI instruction in the
|
|
|
|
// block.
|
|
|
|
Assert(BB->getFirstNonPHI() == &CPI,
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
"CatchPadInst not the first non-PHI instruction in the block.", &CPI);
|
2015-08-01 01:58:14 +08:00
|
|
|
|
2016-03-01 06:56:36 +08:00
|
|
|
visitEHPadPredecessors(CPI);
|
2016-01-10 12:30:02 +08:00
|
|
|
visitFuncletPadInst(CPI);
|
2015-08-01 01:58:14 +08:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
|
|
|
|
Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)),
|
|
|
|
"CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
|
|
|
|
CatchReturn.getOperand(0));
|
2015-08-01 01:58:14 +08:00
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
visitTerminatorInst(CatchReturn);
|
2015-08-01 01:58:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
|
|
|
|
BasicBlock *BB = CPI.getParent();
|
|
|
|
|
|
|
|
Function *F = BB->getParent();
|
|
|
|
Assert(F->hasPersonalityFn(),
|
|
|
|
"CleanupPadInst needs to be in a function with a personality.", &CPI);
|
|
|
|
|
|
|
|
// The cleanuppad instruction must be the first non-PHI instruction in the
|
|
|
|
// block.
|
|
|
|
Assert(BB->getFirstNonPHI() == &CPI,
|
|
|
|
"CleanupPadInst not the first non-PHI instruction in the block.",
|
|
|
|
&CPI);
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
auto *ParentPad = CPI.getParentPad();
|
2016-01-02 23:24:24 +08:00
|
|
|
Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
"CleanupPadInst has an invalid parent.", &CPI);
|
|
|
|
|
2016-03-01 06:56:36 +08:00
|
|
|
visitEHPadPredecessors(CPI);
|
2016-01-10 12:30:02 +08:00
|
|
|
visitFuncletPadInst(CPI);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
|
2015-09-03 17:09:43 +08:00
|
|
|
User *FirstUser = nullptr;
|
2016-01-10 12:30:02 +08:00
|
|
|
Value *FirstUnwindPad = nullptr;
|
|
|
|
SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
|
2016-03-01 09:19:05 +08:00
|
|
|
SmallSet<FuncletPadInst *, 8> Seen;
|
2016-03-01 06:56:36 +08:00
|
|
|
|
2016-01-10 12:30:02 +08:00
|
|
|
while (!Worklist.empty()) {
|
|
|
|
FuncletPadInst *CurrentPad = Worklist.pop_back_val();
|
2016-03-01 06:56:36 +08:00
|
|
|
Assert(Seen.insert(CurrentPad).second,
|
|
|
|
"FuncletPadInst must not be nested within itself", CurrentPad);
|
2016-01-10 12:30:02 +08:00
|
|
|
Value *UnresolvedAncestorPad = nullptr;
|
|
|
|
for (User *U : CurrentPad->users()) {
|
|
|
|
BasicBlock *UnwindDest;
|
|
|
|
if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
|
|
|
|
UnwindDest = CRI->getUnwindDest();
|
|
|
|
} else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
|
|
|
|
// We allow catchswitch unwind to caller to nest
|
|
|
|
// within an outer pad that unwinds somewhere else,
|
|
|
|
// because catchswitch doesn't have a nounwind variant.
|
|
|
|
// See e.g. SimplifyCFGOpt::SimplifyUnreachable.
|
|
|
|
if (CSI->unwindsToCaller())
|
|
|
|
continue;
|
|
|
|
UnwindDest = CSI->getUnwindDest();
|
|
|
|
} else if (auto *II = dyn_cast<InvokeInst>(U)) {
|
|
|
|
UnwindDest = II->getUnwindDest();
|
|
|
|
} else if (isa<CallInst>(U)) {
|
|
|
|
// Calls which don't unwind may be found inside funclet
|
|
|
|
// pads that unwind somewhere else. We don't *require*
|
|
|
|
// such calls to be annotated nounwind.
|
|
|
|
continue;
|
|
|
|
} else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
|
|
|
|
// The unwind dest for a cleanup can only be found by
|
|
|
|
// recursive search. Add it to the worklist, and we'll
|
|
|
|
// search for its first use that determines where it unwinds.
|
|
|
|
Worklist.push_back(CPI);
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *UnwindPad;
|
|
|
|
bool ExitsFPI;
|
|
|
|
if (UnwindDest) {
|
|
|
|
UnwindPad = UnwindDest->getFirstNonPHI();
|
2016-03-01 06:56:36 +08:00
|
|
|
if (!cast<Instruction>(UnwindPad)->isEHPad())
|
|
|
|
continue;
|
2016-01-10 12:30:02 +08:00
|
|
|
Value *UnwindParent = getParentPad(UnwindPad);
|
|
|
|
// Ignore unwind edges that don't exit CurrentPad.
|
|
|
|
if (UnwindParent == CurrentPad)
|
|
|
|
continue;
|
|
|
|
// Determine whether the original funclet pad is exited,
|
|
|
|
// and if we are scanning nested pads determine how many
|
|
|
|
// of them are exited so we can stop searching their
|
|
|
|
// children.
|
|
|
|
Value *ExitedPad = CurrentPad;
|
|
|
|
ExitsFPI = false;
|
|
|
|
do {
|
|
|
|
if (ExitedPad == &FPI) {
|
|
|
|
ExitsFPI = true;
|
|
|
|
// Now we can resolve any ancestors of CurrentPad up to
|
|
|
|
// FPI, but not including FPI since we need to make sure
|
|
|
|
// to check all direct users of FPI for consistency.
|
|
|
|
UnresolvedAncestorPad = &FPI;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Value *ExitedParent = getParentPad(ExitedPad);
|
|
|
|
if (ExitedParent == UnwindParent) {
|
|
|
|
// ExitedPad is the ancestor-most pad which this unwind
|
|
|
|
// edge exits, so we can resolve up to it, meaning that
|
|
|
|
// ExitedParent is the first ancestor still unresolved.
|
|
|
|
UnresolvedAncestorPad = ExitedParent;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ExitedPad = ExitedParent;
|
|
|
|
} while (!isa<ConstantTokenNone>(ExitedPad));
|
|
|
|
} else {
|
|
|
|
// Unwinding to caller exits all pads.
|
|
|
|
UnwindPad = ConstantTokenNone::get(FPI.getContext());
|
|
|
|
ExitsFPI = true;
|
|
|
|
UnresolvedAncestorPad = &FPI;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ExitsFPI) {
|
|
|
|
// This unwind edge exits FPI. Make sure it agrees with other
|
|
|
|
// such edges.
|
|
|
|
if (FirstUser) {
|
|
|
|
Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet "
|
|
|
|
"pad must have the same unwind "
|
|
|
|
"dest",
|
|
|
|
&FPI, U, FirstUser);
|
|
|
|
} else {
|
|
|
|
FirstUser = U;
|
|
|
|
FirstUnwindPad = UnwindPad;
|
2016-01-10 12:31:05 +08:00
|
|
|
// Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
|
|
|
|
if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
|
|
|
|
getParentPad(UnwindPad) == getParentPad(&FPI))
|
|
|
|
SiblingFuncletInfo[&FPI] = cast<TerminatorInst>(U);
|
2016-01-10 12:30:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make sure we visit all uses of FPI, but for nested pads stop as
|
|
|
|
// soon as we know where they unwind to.
|
|
|
|
if (CurrentPad != &FPI)
|
|
|
|
break;
|
2015-08-23 08:26:33 +08:00
|
|
|
}
|
2016-01-10 12:30:02 +08:00
|
|
|
if (UnresolvedAncestorPad) {
|
|
|
|
if (CurrentPad == UnresolvedAncestorPad) {
|
|
|
|
// When CurrentPad is FPI itself, we don't mark it as resolved even if
|
|
|
|
// we've found an unwind edge that exits it, because we need to verify
|
|
|
|
// all direct uses of FPI.
|
|
|
|
assert(CurrentPad == &FPI);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Pop off the worklist any nested pads that we've found an unwind
|
|
|
|
// destination for. The pads on the worklist are the uncles,
|
|
|
|
// great-uncles, etc. of CurrentPad. We've found an unwind destination
|
|
|
|
// for all ancestors of CurrentPad up to but not including
|
|
|
|
// UnresolvedAncestorPad.
|
|
|
|
Value *ResolvedPad = CurrentPad;
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
Value *UnclePad = Worklist.back();
|
|
|
|
Value *AncestorPad = getParentPad(UnclePad);
|
|
|
|
// Walk ResolvedPad up the ancestor list until we either find the
|
|
|
|
// uncle's parent or the last resolved ancestor.
|
|
|
|
while (ResolvedPad != AncestorPad) {
|
|
|
|
Value *ResolvedParent = getParentPad(ResolvedPad);
|
|
|
|
if (ResolvedParent == UnresolvedAncestorPad) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ResolvedPad = ResolvedParent;
|
|
|
|
}
|
|
|
|
// If the resolved ancestor search didn't find the uncle's parent,
|
|
|
|
// then the uncle is not yet resolved.
|
|
|
|
if (ResolvedPad != AncestorPad)
|
|
|
|
break;
|
|
|
|
// This uncle is resolved, so pop it from the worklist.
|
|
|
|
Worklist.pop_back();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-08-23 08:26:33 +08:00
|
|
|
|
2016-01-10 12:30:02 +08:00
|
|
|
if (FirstUnwindPad) {
|
|
|
|
if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
|
|
|
|
BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
|
|
|
|
Value *SwitchUnwindPad;
|
|
|
|
if (SwitchUnwindDest)
|
|
|
|
SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
|
|
|
|
else
|
|
|
|
SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
|
|
|
|
Assert(SwitchUnwindPad == FirstUnwindPad,
|
|
|
|
"Unwind edges out of a catch must have the same unwind dest as "
|
|
|
|
"the parent catchswitch",
|
|
|
|
&FPI, FirstUser, CatchSwitch);
|
2015-09-03 17:09:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-10 12:30:02 +08:00
|
|
|
visitInstruction(FPI);
|
2015-08-01 01:58:14 +08:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
|
|
|
|
BasicBlock *BB = CatchSwitch.getParent();
|
2015-09-03 17:09:43 +08:00
|
|
|
|
|
|
|
Function *F = BB->getParent();
|
|
|
|
Assert(F->hasPersonalityFn(),
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
"CatchSwitchInst needs to be in a function with a personality.",
|
|
|
|
&CatchSwitch);
|
2015-09-03 17:09:43 +08:00
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
// The catchswitch instruction must be the first non-PHI instruction in the
|
2015-09-03 17:09:43 +08:00
|
|
|
// block.
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
Assert(BB->getFirstNonPHI() == &CatchSwitch,
|
|
|
|
"CatchSwitchInst not the first non-PHI instruction in the block.",
|
|
|
|
&CatchSwitch);
|
2015-09-03 17:09:43 +08:00
|
|
|
|
2016-01-10 12:31:05 +08:00
|
|
|
auto *ParentPad = CatchSwitch.getParentPad();
|
|
|
|
Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
|
|
|
|
"CatchSwitchInst has an invalid parent.", ParentPad);
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
|
2015-09-03 17:09:43 +08:00
|
|
|
Instruction *I = UnwindDest->getFirstNonPHI();
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
Assert(I->isEHPad() && !isa<LandingPadInst>(I),
|
|
|
|
"CatchSwitchInst must unwind to an EH block which is not a "
|
|
|
|
"landingpad.",
|
|
|
|
&CatchSwitch);
|
2015-09-03 17:09:43 +08:00
|
|
|
|
2016-01-10 12:31:05 +08:00
|
|
|
// Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
|
|
|
|
if (getParentPad(I) == ParentPad)
|
|
|
|
SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
|
|
|
|
}
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
|
2016-01-02 23:25:25 +08:00
|
|
|
Assert(CatchSwitch.getNumHandlers() != 0,
|
|
|
|
"CatchSwitchInst cannot have empty handler list", &CatchSwitch);
|
|
|
|
|
2016-01-02 23:50:34 +08:00
|
|
|
for (BasicBlock *Handler : CatchSwitch.handlers()) {
|
2016-01-02 23:25:25 +08:00
|
|
|
Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()),
|
|
|
|
"CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
|
2016-01-02 23:50:34 +08:00
|
|
|
}
|
2016-01-02 23:25:25 +08:00
|
|
|
|
2016-03-01 06:56:36 +08:00
|
|
|
visitEHPadPredecessors(CatchSwitch);
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
visitTerminatorInst(CatchSwitch);
|
2015-09-03 17:09:43 +08:00
|
|
|
}
|
|
|
|
|
2015-08-01 01:58:14 +08:00
|
|
|
void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
Assert(isa<CleanupPadInst>(CRI.getOperand(0)),
|
|
|
|
"CleanupReturnInst needs to be provided a CleanupPad", &CRI,
|
|
|
|
CRI.getOperand(0));
|
|
|
|
|
2015-08-01 01:58:14 +08:00
|
|
|
if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
|
|
|
|
Instruction *I = UnwindDest->getFirstNonPHI();
|
|
|
|
Assert(I->isEHPad() && !isa<LandingPadInst>(I),
|
|
|
|
"CleanupReturnInst must unwind to an EH block which is not a "
|
|
|
|
"landingpad.",
|
|
|
|
&CRI);
|
|
|
|
}
|
|
|
|
|
|
|
|
visitTerminatorInst(CRI);
|
|
|
|
}
|
|
|
|
|
2012-02-26 10:23:37 +08:00
|
|
|
void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
|
|
|
|
Instruction *Op = cast<Instruction>(I.getOperand(i));
|
2012-08-18 02:21:28 +08:00
|
|
|
// If the we have an invalid invoke, don't try to compute the dominance.
|
|
|
|
// We already reject it in the invoke specific checks and the dominance
|
|
|
|
// computation doesn't handle multiple edges.
|
|
|
|
if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
|
|
|
|
if (II->getNormalDest() == II->getUnwindDest())
|
|
|
|
return;
|
|
|
|
}
|
2012-02-26 10:23:37 +08:00
|
|
|
|
[Verifier] Reject PHIs using defs from own block.
Reject the following IR as malformed (assuming that %entry, %next are
not in a loop):
next:
%y = phi i32 [ 0, %entry ]
%x = phi i32 [ %y, %entry ]
Such PHI nodes came up in PR26718. While there was no consensus on
whether or not this is valid IR, most opinions on that bug and in a
discussion on the llvm-dev mailing list tended towards a
"strict interpretation" (term by Joseph Tremoulet) of PHI node uses.
Also, the language reference explicitly states that "the use of each
incoming value is deemed to occur on the edge from the corresponding
predecessor block to the current block" and
`DominatorTree::dominates(Instruction*, Use&)` uses this definition as
well.
For the code mentioned in PR15384, clang does not compile to such PHIs
(anymore?). The test case still hangs when replacing `%tmp6` with `%tmp`
in revisions before r176366 (where PR15384 has been fixed). The
occurrence of %tmp6 therefore was probably unintentional. Its value is
not used except in other PHIs.
Reviewers: majnemer, reames, JosephTremoulet, bkramer, grosser, jdoerfert, kparzysz, sanjoy
Differential Revision: http://reviews.llvm.org/D18443
llvm-svn: 264528
2016-03-27 07:32:57 +08:00
|
|
|
// Quick check whether the def has already been encountered in the same block.
|
|
|
|
// PHI nodes are not checked to prevent accepting preceeding PHIs, because PHI
|
|
|
|
// uses are defined to happen on the incoming edge, not at the instruction.
|
Reapply "ValueMapper: Treat LocalAsMetadata more like function-local Values"
This reverts commit r265765, reapplying r265759 after changing a call from
LocalAsMetadata::get to ValueAsMetadata::get (and adding a unit test). When a
local value is mapped to a constant (like "i32 %a" => "i32 7"), the new debug
intrinsic operand may no longer be pointing at a local.
http://lab.llvm.org:8080/green/job/clang-stage1-configure-RA_build/19020/
The previous coommit message follows:
--
This is a partial re-commit -- maybe more of a re-implementation -- of
r265631 (reverted in r265637).
This makes RF_IgnoreMissingLocals behave (almost) consistently between
the Value and the Metadata hierarchy. In particular:
- MapValue returns nullptr or "metadata !{}" for missing locals in
MetadataAsValue/LocalAsMetadata bridging paris, depending on
the RF_IgnoreMissingLocals flag.
- MapValue doesn't memoize LocalAsMetadata-related results.
- MapMetadata no longer deals with LocalAsMetadata or
RF_IgnoreMissingLocals at all. (This wasn't in r265631 at all, but
I realized during testing it would make the patch simpler with no
loss of generality.)
r265631 went too far, making both functions universally ignore
RF_IgnoreMissingLocals. This broke building (e.g.) compiler-rt.
Reassociate (and possibly other passes) don't currently maintain
dominates-use invariants for metadata operands, resulting in IR like
this:
define void @foo(i32 %arg) {
call void @llvm.some.intrinsic(metadata i32 %x)
%x = add i32 1, i32 %arg
}
If the inliner chooses to inline @foo into another function, then
RemapInstruction will call `MapValue(metadata i32 %x)` and assert that
the return is not nullptr.
I've filed PR27273 to add a Verifier check and fix the underlying
problem in the optimization passes.
As a workaround, return `!{}` instead of nullptr for unmapped
LocalAsMetadata when RF_IgnoreMissingLocals is unset. Otherwise, match
the behaviour of r265631.
Original commit message:
ValueMapper: Make LocalAsMetadata match function-local Values
Start treating LocalAsMetadata similarly to function-local members of
the Value hierarchy in MapValue and MapMetadata.
- Don't memoize them.
- Return nullptr if they are missing.
This also cleans up ConstantAsMetadata to stop listening to the
RF_IgnoreMissingLocals flag.
llvm-svn: 265768
2016-04-08 11:13:22 +08:00
|
|
|
//
|
|
|
|
// FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
|
|
|
|
// wrapping an SSA value, assert that we've already encountered it. See
|
|
|
|
// related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
|
[Verifier] Reject PHIs using defs from own block.
Reject the following IR as malformed (assuming that %entry, %next are
not in a loop):
next:
%y = phi i32 [ 0, %entry ]
%x = phi i32 [ %y, %entry ]
Such PHI nodes came up in PR26718. While there was no consensus on
whether or not this is valid IR, most opinions on that bug and in a
discussion on the llvm-dev mailing list tended towards a
"strict interpretation" (term by Joseph Tremoulet) of PHI node uses.
Also, the language reference explicitly states that "the use of each
incoming value is deemed to occur on the edge from the corresponding
predecessor block to the current block" and
`DominatorTree::dominates(Instruction*, Use&)` uses this definition as
well.
For the code mentioned in PR15384, clang does not compile to such PHIs
(anymore?). The test case still hangs when replacing `%tmp6` with `%tmp`
in revisions before r176366 (where PR15384 has been fixed). The
occurrence of %tmp6 therefore was probably unintentional. Its value is
not used except in other PHIs.
Reviewers: majnemer, reames, JosephTremoulet, bkramer, grosser, jdoerfert, kparzysz, sanjoy
Differential Revision: http://reviews.llvm.org/D18443
llvm-svn: 264528
2016-03-27 07:32:57 +08:00
|
|
|
if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
|
|
|
|
return;
|
|
|
|
|
2012-06-02 05:56:26 +08:00
|
|
|
const Use &U = I.getOperandUse(i);
|
[Verifier] Reject PHIs using defs from own block.
Reject the following IR as malformed (assuming that %entry, %next are
not in a loop):
next:
%y = phi i32 [ 0, %entry ]
%x = phi i32 [ %y, %entry ]
Such PHI nodes came up in PR26718. While there was no consensus on
whether or not this is valid IR, most opinions on that bug and in a
discussion on the llvm-dev mailing list tended towards a
"strict interpretation" (term by Joseph Tremoulet) of PHI node uses.
Also, the language reference explicitly states that "the use of each
incoming value is deemed to occur on the edge from the corresponding
predecessor block to the current block" and
`DominatorTree::dominates(Instruction*, Use&)` uses this definition as
well.
For the code mentioned in PR15384, clang does not compile to such PHIs
(anymore?). The test case still hangs when replacing `%tmp6` with `%tmp`
in revisions before r176366 (where PR15384 has been fixed). The
occurrence of %tmp6 therefore was probably unintentional. Its value is
not used except in other PHIs.
Reviewers: majnemer, reames, JosephTremoulet, bkramer, grosser, jdoerfert, kparzysz, sanjoy
Differential Revision: http://reviews.llvm.org/D18443
llvm-svn: 264528
2016-03-27 07:32:57 +08:00
|
|
|
Assert(DT.dominates(Op, U),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Instruction does not dominate all uses!", Op, &I);
|
2012-02-26 10:23:37 +08:00
|
|
|
}
|
|
|
|
|
2015-10-10 01:41:29 +08:00
|
|
|
void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
|
|
|
|
Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
|
|
|
|
"apply only to pointer types", &I);
|
|
|
|
Assert(isa<LoadInst>(I),
|
|
|
|
"dereferenceable, dereferenceable_or_null apply only to load"
|
|
|
|
" instructions, use attributes for calls or invokes", &I);
|
|
|
|
Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
|
|
|
|
"take one operand!", &I);
|
|
|
|
ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
|
|
|
|
Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, "
|
|
|
|
"dereferenceable_or_null metadata value must be an i64!", &I);
|
|
|
|
}
|
|
|
|
|
2004-03-02 08:22:19 +08:00
|
|
|
/// verifyInstruction - Verify that an instruction is well formed.
|
|
|
|
///
|
2002-06-25 23:56:27 +08:00
|
|
|
void Verifier::visitInstruction(Instruction &I) {
|
2005-04-22 07:48:37 +08:00
|
|
|
BasicBlock *BB = I.getParent();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(BB, "Instruction not embedded in basic block!", &I);
|
2002-04-19 04:37:37 +08:00
|
|
|
|
|
|
|
if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
|
2014-03-09 12:57:09 +08:00
|
|
|
for (User *U : I.users()) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(U != (User *)&I || !DT.isReachableFromEntry(BB),
|
|
|
|
"Only PHI nodes may reference their own value!", &I);
|
2014-03-09 12:57:09 +08:00
|
|
|
}
|
2002-04-19 04:37:37 +08:00
|
|
|
}
|
2009-09-08 04:44:51 +08:00
|
|
|
|
2002-07-18 08:13:42 +08:00
|
|
|
// Check that void typed values don't have names
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!I.getType()->isVoidTy() || !I.hasName(),
|
|
|
|
"Instruction has a name, but provides a void value!", &I);
|
2002-07-18 08:13:42 +08:00
|
|
|
|
2004-03-29 08:29:36 +08:00
|
|
|
// Check that the return value of the instruction is either void or a legal
|
|
|
|
// value type.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
|
|
|
|
"Instruction returns a non-scalar type!", &I);
|
2004-03-29 08:29:36 +08:00
|
|
|
|
2009-09-28 07:27:42 +08:00
|
|
|
// Check that the instruction doesn't produce metadata. Calls are already
|
|
|
|
// checked against the callee type.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
|
|
|
|
"Invalid use of metadata!", &I);
|
2009-05-30 13:06:04 +08:00
|
|
|
|
2003-10-06 01:44:18 +08:00
|
|
|
// Check that all uses of the instruction, if they are instructions
|
|
|
|
// themselves, actually have parent basic blocks. If the use is not an
|
|
|
|
// instruction, it is an error!
|
2014-03-09 11:16:01 +08:00
|
|
|
for (Use &U : I.uses()) {
|
|
|
|
if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Used->getParent() != nullptr,
|
|
|
|
"Instruction referencing"
|
|
|
|
" instruction not embedded in a basic block!",
|
|
|
|
&I, Used);
|
2009-09-08 10:02:39 +08:00
|
|
|
else {
|
2014-03-09 11:16:01 +08:00
|
|
|
CheckFailed("Use of instruction is not an instruction!", U);
|
2009-09-08 10:02:39 +08:00
|
|
|
return;
|
|
|
|
}
|
2002-07-18 08:13:42 +08:00
|
|
|
}
|
2003-05-08 11:47:33 +08:00
|
|
|
|
2003-10-06 01:44:18 +08:00
|
|
|
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
|
2006-07-12 04:29:49 +08:00
|
|
|
|
|
|
|
// Check to make sure that only first-class-values are operands to
|
|
|
|
// instructions.
|
2008-02-21 09:54:02 +08:00
|
|
|
if (!I.getOperand(i)->getType()->isFirstClassType()) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(0, "Instruction operands must be first-class values!", &I);
|
2008-02-21 09:54:02 +08:00
|
|
|
}
|
2009-05-30 13:06:04 +08:00
|
|
|
|
2004-03-14 11:23:54 +08:00
|
|
|
if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
|
2006-07-12 04:29:49 +08:00
|
|
|
// Check to make sure that the "address of" an intrinsic function is never
|
|
|
|
// taken.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(
|
|
|
|
!F->isIntrinsic() ||
|
|
|
|
i == (isa<CallInst>(I) ? e - 1 : isa<InvokeInst>(I) ? e - 3 : 0),
|
|
|
|
"Cannot take the address of an intrinsic!", &I);
|
|
|
|
Assert(
|
|
|
|
!F->isIntrinsic() || isa<CallInst>(I) ||
|
2014-10-18 01:39:00 +08:00
|
|
|
F->getIntrinsicID() == Intrinsic::donothing ||
|
|
|
|
F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void ||
|
2015-02-19 19:28:47 +08:00
|
|
|
F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
|
|
|
|
F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint,
|
2016-03-01 06:04:25 +08:00
|
|
|
"Cannot invoke an intrinsic other than donothing, patchpoint or "
|
|
|
|
"statepoint",
|
2015-03-08 05:15:40 +08:00
|
|
|
&I);
|
|
|
|
Assert(F->getParent() == M, "Referencing function in another module!",
|
2015-12-02 03:06:36 +08:00
|
|
|
&I, M, F, F->getParent());
|
2004-03-14 11:23:54 +08:00
|
|
|
} else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(OpBB->getParent() == BB->getParent(),
|
|
|
|
"Referring to a basic block in another function!", &I);
|
2004-03-14 11:23:54 +08:00
|
|
|
} else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(OpArg->getParent() == BB->getParent(),
|
|
|
|
"Referring to an argument in another function!", &I);
|
2007-04-21 05:48:08 +08:00
|
|
|
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
|
2015-12-02 03:06:36 +08:00
|
|
|
Assert(GV->getParent() == M, "Referencing global in another module!", &I, M, GV, GV->getParent());
|
2012-02-26 10:23:37 +08:00
|
|
|
} else if (isa<Instruction>(I.getOperand(i))) {
|
|
|
|
verifyDominatesUse(I, i);
|
2006-01-26 08:08:45 +08:00
|
|
|
} else if (isa<InlineAsm>(I.getOperand(i))) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert((i + 1 == e && isa<CallInst>(I)) ||
|
|
|
|
(i + 3 == e && isa<InvokeInst>(I)),
|
|
|
|
"Cannot take the address of an inline asm!", &I);
|
2013-08-01 01:49:08 +08:00
|
|
|
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
|
|
|
|
if (CE->getType()->isPtrOrPtrVectorTy()) {
|
|
|
|
// If we have a ConstantExpr pointer, we need to see if it came from an
|
|
|
|
// illegal bitcast (inttoptr <constant int> )
|
2015-12-11 01:56:06 +08:00
|
|
|
visitConstantExprsRecursively(CE);
|
2013-08-01 01:49:08 +08:00
|
|
|
}
|
2003-10-06 01:44:18 +08:00
|
|
|
}
|
|
|
|
}
|
2012-03-24 08:14:51 +08:00
|
|
|
|
2014-11-12 05:30:22 +08:00
|
|
|
if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(I.getType()->isFPOrFPVectorTy(),
|
|
|
|
"fpmath requires a floating point result!", &I);
|
|
|
|
Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
if (ConstantFP *CFP0 =
|
|
|
|
mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
|
2012-04-17 00:28:59 +08:00
|
|
|
APFloat Accuracy = CFP0->getValueAPF();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
|
|
|
|
"fpmath accuracy not a positive number!", &I);
|
2012-04-17 00:28:59 +08:00
|
|
|
} else {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(false, "invalid fpmath accuracy!", &I);
|
2012-04-17 00:28:59 +08:00
|
|
|
}
|
2012-04-10 16:22:43 +08:00
|
|
|
}
|
|
|
|
|
2014-11-12 05:30:22 +08:00
|
|
|
if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
|
|
|
|
"Ranges are only for loads, calls and invokes!", &I);
|
2014-10-21 07:52:07 +08:00
|
|
|
visitRangeMetadata(I, Range, I.getType());
|
|
|
|
}
|
2012-03-24 08:14:51 +08:00
|
|
|
|
2014-10-22 04:56:29 +08:00
|
|
|
if (I.getMetadata(LLVMContext::MD_nonnull)) {
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
|
|
|
|
&I);
|
|
|
|
Assert(isa<LoadInst>(I),
|
|
|
|
"nonnull applies only to load instructions, use attributes"
|
|
|
|
" for calls or invokes",
|
|
|
|
&I);
|
2014-10-22 04:56:29 +08:00
|
|
|
}
|
|
|
|
|
2015-10-10 01:41:29 +08:00
|
|
|
if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
|
|
|
|
visitDereferenceableMetadata(I, MD);
|
|
|
|
|
|
|
|
if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
|
|
|
|
visitDereferenceableMetadata(I, MD);
|
|
|
|
|
|
|
|
if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
|
|
|
|
Assert(I.getType()->isPointerTy(), "align applies only to pointer types",
|
|
|
|
&I);
|
|
|
|
Assert(isa<LoadInst>(I), "align applies only to load instructions, "
|
|
|
|
"use attributes for calls or invokes", &I);
|
|
|
|
Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
|
|
|
|
ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
|
|
|
|
Assert(CI && CI->getType()->isIntegerTy(64),
|
|
|
|
"align metadata value must be an i64!", &I);
|
|
|
|
uint64_t Align = CI->getZExtValue();
|
|
|
|
Assert(isPowerOf2_64(Align),
|
|
|
|
"align metadata value must be a power of 2!", &I);
|
|
|
|
Assert(Align <= Value::MaximumAlignment,
|
|
|
|
"alignment is larger that implementation defined limit", &I);
|
|
|
|
}
|
|
|
|
|
2015-03-21 03:26:58 +08:00
|
|
|
if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
|
2015-03-25 01:32:19 +08:00
|
|
|
visitMDNode(*N);
|
2015-03-21 03:26:58 +08:00
|
|
|
}
|
|
|
|
|
2016-04-24 05:08:00 +08:00
|
|
|
if (auto *DII = dyn_cast<DbgInfoIntrinsic>(&I))
|
|
|
|
verifyBitPieceExpression(*DII);
|
|
|
|
|
2004-09-30 04:07:45 +08:00
|
|
|
InstsInThisBlock.insert(&I);
|
2003-05-08 11:47:33 +08:00
|
|
|
}
|
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
/// Verify that the specified type (which comes from an intrinsic argument or
|
|
|
|
/// return value) matches the type constraints specified by the .td file (e.g.
|
|
|
|
/// an "any integer" argument really is an integer).
|
2012-05-28 03:37:05 +08:00
|
|
|
///
|
2016-02-01 00:32:23 +08:00
|
|
|
/// This returns true on error but does not print a message.
|
|
|
|
bool Verifier::verifyIntrinsicType(Type *Ty,
|
2012-05-28 03:37:05 +08:00
|
|
|
ArrayRef<Intrinsic::IITDescriptor> &Infos,
|
|
|
|
SmallVectorImpl<Type*> &ArgTys) {
|
|
|
|
using namespace Intrinsic;
|
|
|
|
|
|
|
|
// If we ran out of descriptors, there are too many arguments.
|
2013-07-21 01:46:00 +08:00
|
|
|
if (Infos.empty()) return true;
|
2012-05-28 03:37:05 +08:00
|
|
|
IITDescriptor D = Infos.front();
|
|
|
|
Infos = Infos.slice(1);
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2012-05-28 03:37:05 +08:00
|
|
|
switch (D.Kind) {
|
|
|
|
case IITDescriptor::Void: return !Ty->isVoidTy();
|
2013-11-01 01:18:11 +08:00
|
|
|
case IITDescriptor::VarArg: return true;
|
2012-05-28 03:37:05 +08:00
|
|
|
case IITDescriptor::MMX: return !Ty->isX86_MMXTy();
|
2015-09-02 21:36:25 +08:00
|
|
|
case IITDescriptor::Token: return !Ty->isTokenTy();
|
2012-05-28 03:37:05 +08:00
|
|
|
case IITDescriptor::Metadata: return !Ty->isMetadataTy();
|
2013-01-11 09:45:05 +08:00
|
|
|
case IITDescriptor::Half: return !Ty->isHalfTy();
|
2012-05-28 03:37:05 +08:00
|
|
|
case IITDescriptor::Float: return !Ty->isFloatTy();
|
|
|
|
case IITDescriptor::Double: return !Ty->isDoubleTy();
|
|
|
|
case IITDescriptor::Integer: return !Ty->isIntegerTy(D.Integer_Width);
|
|
|
|
case IITDescriptor::Vector: {
|
|
|
|
VectorType *VT = dyn_cast<VectorType>(Ty);
|
2014-04-09 14:08:46 +08:00
|
|
|
return !VT || VT->getNumElements() != D.Vector_Width ||
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyIntrinsicType(VT->getElementType(), Infos, ArgTys);
|
2012-05-28 03:37:05 +08:00
|
|
|
}
|
|
|
|
case IITDescriptor::Pointer: {
|
|
|
|
PointerType *PT = dyn_cast<PointerType>(Ty);
|
2014-04-09 14:08:46 +08:00
|
|
|
return !PT || PT->getAddressSpace() != D.Pointer_AddressSpace ||
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyIntrinsicType(PT->getElementType(), Infos, ArgTys);
|
2012-05-28 03:37:05 +08:00
|
|
|
}
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2012-05-28 03:37:05 +08:00
|
|
|
case IITDescriptor::Struct: {
|
|
|
|
StructType *ST = dyn_cast<StructType>(Ty);
|
2014-04-09 14:08:46 +08:00
|
|
|
if (!ST || ST->getNumElements() != D.Struct_NumElements)
|
2012-05-28 03:37:05 +08:00
|
|
|
return true;
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2012-05-28 03:37:05 +08:00
|
|
|
for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
|
2016-02-01 00:32:23 +08:00
|
|
|
if (verifyIntrinsicType(ST->getElementType(i), Infos, ArgTys))
|
2012-05-28 03:37:05 +08:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2012-05-28 03:37:05 +08:00
|
|
|
case IITDescriptor::Argument:
|
2012-06-02 18:20:22 +08:00
|
|
|
// Two cases here - If this is the second occurrence of an argument, verify
|
2013-07-21 01:46:00 +08:00
|
|
|
// that the later instance matches the previous instance.
|
2012-05-28 03:37:05 +08:00
|
|
|
if (D.getArgumentNumber() < ArgTys.size())
|
2013-07-21 01:46:00 +08:00
|
|
|
return Ty != ArgTys[D.getArgumentNumber()];
|
|
|
|
|
2012-05-28 03:37:05 +08:00
|
|
|
// Otherwise, if this is the first instance of an argument, record it and
|
|
|
|
// verify the "Any" kind.
|
|
|
|
assert(D.getArgumentNumber() == ArgTys.size() && "Table consistency error");
|
|
|
|
ArgTys.push_back(Ty);
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2012-05-28 03:37:05 +08:00
|
|
|
switch (D.getArgumentKind()) {
|
2015-01-23 04:14:38 +08:00
|
|
|
case IITDescriptor::AK_Any: return false; // Success
|
2012-05-28 03:37:05 +08:00
|
|
|
case IITDescriptor::AK_AnyInteger: return !Ty->isIntOrIntVectorTy();
|
|
|
|
case IITDescriptor::AK_AnyFloat: return !Ty->isFPOrFPVectorTy();
|
|
|
|
case IITDescriptor::AK_AnyVector: return !isa<VectorType>(Ty);
|
|
|
|
case IITDescriptor::AK_AnyPointer: return !isa<PointerType>(Ty);
|
|
|
|
}
|
|
|
|
llvm_unreachable("all argument kinds not covered");
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2014-03-28 20:31:39 +08:00
|
|
|
case IITDescriptor::ExtendArgument: {
|
2012-05-28 03:37:05 +08:00
|
|
|
// This may only be used when referring to a previous vector argument.
|
2014-03-28 20:31:39 +08:00
|
|
|
if (D.getArgumentNumber() >= ArgTys.size())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
Type *NewTy = ArgTys[D.getArgumentNumber()];
|
|
|
|
if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
|
|
|
|
NewTy = VectorType::getExtendedElementVectorType(VTy);
|
|
|
|
else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
|
|
|
|
NewTy = IntegerType::get(ITy->getContext(), 2 * ITy->getBitWidth());
|
|
|
|
else
|
|
|
|
return true;
|
2012-05-28 03:37:05 +08:00
|
|
|
|
2014-03-28 20:31:39 +08:00
|
|
|
return Ty != NewTy;
|
|
|
|
}
|
|
|
|
case IITDescriptor::TruncArgument: {
|
2012-05-28 03:37:05 +08:00
|
|
|
// This may only be used when referring to a previous vector argument.
|
2014-03-28 20:31:39 +08:00
|
|
|
if (D.getArgumentNumber() >= ArgTys.size())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
Type *NewTy = ArgTys[D.getArgumentNumber()];
|
|
|
|
if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
|
|
|
|
NewTy = VectorType::getTruncatedElementVectorType(VTy);
|
|
|
|
else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
|
|
|
|
NewTy = IntegerType::get(ITy->getContext(), ITy->getBitWidth() / 2);
|
|
|
|
else
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return Ty != NewTy;
|
|
|
|
}
|
2014-03-29 15:04:54 +08:00
|
|
|
case IITDescriptor::HalfVecArgument:
|
|
|
|
// This may only be used when referring to a previous vector argument.
|
|
|
|
return D.getArgumentNumber() >= ArgTys.size() ||
|
|
|
|
!isa<VectorType>(ArgTys[D.getArgumentNumber()]) ||
|
|
|
|
VectorType::getHalfElementsVectorType(
|
|
|
|
cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty;
|
2014-12-04 17:40:44 +08:00
|
|
|
case IITDescriptor::SameVecWidthArgument: {
|
|
|
|
if (D.getArgumentNumber() >= ArgTys.size())
|
|
|
|
return true;
|
|
|
|
VectorType * ReferenceType =
|
|
|
|
dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]);
|
|
|
|
VectorType *ThisArgType = dyn_cast<VectorType>(Ty);
|
|
|
|
if (!ThisArgType || !ReferenceType ||
|
|
|
|
(ReferenceType->getVectorNumElements() !=
|
|
|
|
ThisArgType->getVectorNumElements()))
|
|
|
|
return true;
|
2016-02-01 00:32:23 +08:00
|
|
|
return verifyIntrinsicType(ThisArgType->getVectorElementType(),
|
2014-12-04 17:40:44 +08:00
|
|
|
Infos, ArgTys);
|
|
|
|
}
|
2014-12-25 15:49:20 +08:00
|
|
|
case IITDescriptor::PtrToArgument: {
|
|
|
|
if (D.getArgumentNumber() >= ArgTys.size())
|
|
|
|
return true;
|
|
|
|
Type * ReferenceType = ArgTys[D.getArgumentNumber()];
|
|
|
|
PointerType *ThisArgType = dyn_cast<PointerType>(Ty);
|
|
|
|
return (!ThisArgType || ThisArgType->getElementType() != ReferenceType);
|
|
|
|
}
|
Masked Gather and Scatter Intrinsics.
Gather and Scatter are new introduced intrinsics, comming after recently implemented masked load and store.
This is the first patch for Gather and Scatter intrinsics. It includes only the syntax, parsing and verification.
Gather and Scatter intrinsics allow to perform multiple memory accesses (read/write) in one vector instruction.
The intrinsics are not target specific and will have the following syntax:
Gather:
declare <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> <vector of ptrs>, i32 <alignment>, <16 x i1> <mask>, <16 x i32> <passthru>)
declare <8 x float> @llvm.masked.gather.v8f32(<8 x float*><vector of ptrs>, i32 <alignment>, <8 x i1> <mask>, <8 x float><passthru>)
Scatter:
declare void @llvm.masked.scatter.v8i32(<8 x i32><vector value to be stored> , <8 x i32*><vector of ptrs> , i32 <alignment>, <8 x i1> <mask>)
declare void @llvm.masked.scatter.v16i32(<16 x i32> <vector value to be stored> , <16 x i32*> <vector of ptrs>, i32 <alignment>, <16 x i1><mask> )
Vector of ptrs - a set of source/destination addresses, to load/store the value.
Mask - switches on/off vector lanes to prevent memory access for switched-off lanes
vector of ptrs, value and mask should have the same vector width.
These are code examples where gather / scatter should be used and will allow function vectorization
;void foo1(int * restrict A, int * restrict B, int * restrict C) {
; for (int i=0; i<SIZE; i++) {
; A[i] = B[C[i]];
; }
;}
;void foo3(int * restrict A, int * restrict B) {
; for (int i=0; i<SIZE; i++) {
; A[B[i]] = i+5;
; }
;}
Tests will come in the following patches, with CodeGen and Vectorizer.
http://reviews.llvm.org/D7433
llvm-svn: 228521
2015-02-08 16:27:19 +08:00
|
|
|
case IITDescriptor::VecOfPtrsToElt: {
|
|
|
|
if (D.getArgumentNumber() >= ArgTys.size())
|
|
|
|
return true;
|
|
|
|
VectorType * ReferenceType =
|
|
|
|
dyn_cast<VectorType> (ArgTys[D.getArgumentNumber()]);
|
|
|
|
VectorType *ThisArgVecTy = dyn_cast<VectorType>(Ty);
|
|
|
|
if (!ThisArgVecTy || !ReferenceType ||
|
|
|
|
(ReferenceType->getVectorNumElements() !=
|
|
|
|
ThisArgVecTy->getVectorNumElements()))
|
|
|
|
return true;
|
|
|
|
PointerType *ThisArgEltTy =
|
|
|
|
dyn_cast<PointerType>(ThisArgVecTy->getVectorElementType());
|
|
|
|
if (!ThisArgEltTy)
|
|
|
|
return true;
|
2015-05-12 07:09:25 +08:00
|
|
|
return ThisArgEltTy->getElementType() !=
|
|
|
|
ReferenceType->getVectorElementType();
|
Masked Gather and Scatter Intrinsics.
Gather and Scatter are new introduced intrinsics, comming after recently implemented masked load and store.
This is the first patch for Gather and Scatter intrinsics. It includes only the syntax, parsing and verification.
Gather and Scatter intrinsics allow to perform multiple memory accesses (read/write) in one vector instruction.
The intrinsics are not target specific and will have the following syntax:
Gather:
declare <16 x i32> @llvm.masked.gather.v16i32(<16 x i32*> <vector of ptrs>, i32 <alignment>, <16 x i1> <mask>, <16 x i32> <passthru>)
declare <8 x float> @llvm.masked.gather.v8f32(<8 x float*><vector of ptrs>, i32 <alignment>, <8 x i1> <mask>, <8 x float><passthru>)
Scatter:
declare void @llvm.masked.scatter.v8i32(<8 x i32><vector value to be stored> , <8 x i32*><vector of ptrs> , i32 <alignment>, <8 x i1> <mask>)
declare void @llvm.masked.scatter.v16i32(<16 x i32> <vector value to be stored> , <16 x i32*> <vector of ptrs>, i32 <alignment>, <16 x i1><mask> )
Vector of ptrs - a set of source/destination addresses, to load/store the value.
Mask - switches on/off vector lanes to prevent memory access for switched-off lanes
vector of ptrs, value and mask should have the same vector width.
These are code examples where gather / scatter should be used and will allow function vectorization
;void foo1(int * restrict A, int * restrict B, int * restrict C) {
; for (int i=0; i<SIZE; i++) {
; A[i] = B[C[i]];
; }
;}
;void foo3(int * restrict A, int * restrict B) {
; for (int i=0; i<SIZE; i++) {
; A[B[i]] = i+5;
; }
;}
Tests will come in the following patches, with CodeGen and Vectorizer.
http://reviews.llvm.org/D7433
llvm-svn: 228521
2015-02-08 16:27:19 +08:00
|
|
|
}
|
2012-05-28 03:37:05 +08:00
|
|
|
}
|
|
|
|
llvm_unreachable("unhandled");
|
|
|
|
}
|
2009-01-07 08:09:01 +08:00
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
/// Verify if the intrinsic has variable arguments. This method is intended to
|
|
|
|
/// be called after all the fixed arguments have been verified first.
|
2013-11-01 01:18:11 +08:00
|
|
|
///
|
|
|
|
/// This method returns true on error and does not print an error message.
|
|
|
|
bool
|
2016-02-01 00:32:23 +08:00
|
|
|
Verifier::verifyIntrinsicIsVarArg(bool isVarArg,
|
2013-11-01 01:18:11 +08:00
|
|
|
ArrayRef<Intrinsic::IITDescriptor> &Infos) {
|
|
|
|
using namespace Intrinsic;
|
|
|
|
|
|
|
|
// If there are no descriptors left, then it can't be a vararg.
|
|
|
|
if (Infos.empty())
|
2015-03-09 09:57:13 +08:00
|
|
|
return isVarArg;
|
2013-11-01 01:18:11 +08:00
|
|
|
|
|
|
|
// There should be only one descriptor remaining at this point.
|
|
|
|
if (Infos.size() != 1)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Check and verify the descriptor.
|
|
|
|
IITDescriptor D = Infos.front();
|
|
|
|
Infos = Infos.slice(1);
|
|
|
|
if (D.Kind == IITDescriptor::VarArg)
|
2015-03-09 09:57:13 +08:00
|
|
|
return !isVarArg;
|
2013-11-01 01:18:11 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-27 06:21:52 +08:00
|
|
|
/// Allow intrinsics to be verified in different ways.
|
|
|
|
void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
|
2015-06-27 06:04:34 +08:00
|
|
|
Function *IF = CS.getCalledFunction();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
|
|
|
|
IF);
|
2009-09-08 04:44:51 +08:00
|
|
|
|
2012-05-28 03:37:05 +08:00
|
|
|
// Verify that the intrinsic prototype lines up with what the .td files
|
|
|
|
// describe.
|
|
|
|
FunctionType *IFTy = IF->getFunctionType();
|
2013-11-01 01:18:11 +08:00
|
|
|
bool IsVarArg = IFTy->isVarArg();
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2012-05-28 03:37:05 +08:00
|
|
|
SmallVector<Intrinsic::IITDescriptor, 8> Table;
|
|
|
|
getIntrinsicInfoTableEntries(ID, Table);
|
|
|
|
ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
|
|
|
|
|
|
|
|
SmallVector<Type *, 4> ArgTys;
|
2016-02-01 00:32:23 +08:00
|
|
|
Assert(!verifyIntrinsicType(IFTy->getReturnType(), TableRef, ArgTys),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Intrinsic has incorrect return type!", IF);
|
2012-05-28 03:37:05 +08:00
|
|
|
for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i)
|
2016-02-01 00:32:23 +08:00
|
|
|
Assert(!verifyIntrinsicType(IFTy->getParamType(i), TableRef, ArgTys),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Intrinsic has incorrect argument type!", IF);
|
2013-11-01 01:18:11 +08:00
|
|
|
|
|
|
|
// Verify if the intrinsic call matches the vararg property.
|
|
|
|
if (IsVarArg)
|
2016-02-01 00:32:23 +08:00
|
|
|
Assert(!verifyIntrinsicIsVarArg(IsVarArg, TableRef),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Intrinsic was not defined with variable arguments!", IF);
|
2013-11-01 01:18:11 +08:00
|
|
|
else
|
2016-02-01 00:32:23 +08:00
|
|
|
Assert(!verifyIntrinsicIsVarArg(IsVarArg, TableRef),
|
2015-03-08 05:15:40 +08:00
|
|
|
"Callsite was not defined with variable arguments!", IF);
|
2013-11-01 01:18:11 +08:00
|
|
|
|
|
|
|
// All descriptors should be absorbed by now.
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);
|
2012-05-28 03:37:05 +08:00
|
|
|
|
|
|
|
// Now that we have the intrinsic ID and the actual argument types (and we
|
|
|
|
// know they are legal for the intrinsic!) get the intrinsic name through the
|
|
|
|
// usual means. This allows us to verify the mangling of argument types into
|
|
|
|
// the name.
|
2014-03-11 05:22:44 +08:00
|
|
|
const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(ExpectedName == IF->getName(),
|
|
|
|
"Intrinsic name not mangled correctly for type arguments! "
|
|
|
|
"Should be: " +
|
|
|
|
ExpectedName,
|
|
|
|
IF);
|
2013-07-21 01:46:00 +08:00
|
|
|
|
2009-12-28 17:07:21 +08:00
|
|
|
// If the intrinsic takes MDNode arguments, verify that they are either global
|
|
|
|
// or are local to *this* function.
|
2015-06-27 06:21:52 +08:00
|
|
|
for (Value *V : CS.args())
|
|
|
|
if (auto *MD = dyn_cast<MetadataAsValue>(V))
|
|
|
|
visitMetadataAsValue(*MD, CS.getCaller());
|
2009-12-19 04:09:14 +08:00
|
|
|
|
2007-09-18 04:30:04 +08:00
|
|
|
switch (ID) {
|
|
|
|
default:
|
|
|
|
break;
|
2011-12-12 12:36:02 +08:00
|
|
|
case Intrinsic::ctlz: // llvm.ctlz
|
|
|
|
case Intrinsic::cttz: // llvm.cttz
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(isa<ConstantInt>(CS.getArgOperand(1)),
|
2015-03-08 05:15:40 +08:00
|
|
|
"is_zero_undef argument of bit counting intrinsics must be a "
|
|
|
|
"constant int",
|
2015-06-27 06:04:34 +08:00
|
|
|
CS);
|
2011-12-12 12:36:02 +08:00
|
|
|
break;
|
2015-03-15 08:50:57 +08:00
|
|
|
case Intrinsic::dbg_declare: // llvm.dbg.declare
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(isa<MetadataAsValue>(CS.getArgOperand(0)),
|
|
|
|
"invalid llvm.dbg.declare intrinsic call 1", CS);
|
|
|
|
visitDbgIntrinsic("declare", cast<DbgDeclareInst>(*CS.getInstruction()));
|
2015-03-15 09:21:30 +08:00
|
|
|
break;
|
|
|
|
case Intrinsic::dbg_value: // llvm.dbg.value
|
2015-06-27 06:04:34 +08:00
|
|
|
visitDbgIntrinsic("value", cast<DbgValueInst>(*CS.getInstruction()));
|
2015-03-15 08:50:57 +08:00
|
|
|
break;
|
2008-11-22 00:42:48 +08:00
|
|
|
case Intrinsic::memcpy:
|
|
|
|
case Intrinsic::memmove:
|
2015-03-02 17:35:06 +08:00
|
|
|
case Intrinsic::memset: {
|
2015-06-27 06:04:34 +08:00
|
|
|
ConstantInt *AlignCI = dyn_cast<ConstantInt>(CS.getArgOperand(3));
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(AlignCI,
|
|
|
|
"alignment argument of memory intrinsics must be a constant int",
|
2015-06-27 06:04:34 +08:00
|
|
|
CS);
|
2015-03-02 17:35:06 +08:00
|
|
|
const APInt &AlignVal = AlignCI->getValue();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(AlignCI->isZero() || AlignVal.isPowerOf2(),
|
2015-06-27 06:04:34 +08:00
|
|
|
"alignment argument of memory intrinsics must be a power of 2", CS);
|
2015-11-19 13:56:52 +08:00
|
|
|
Assert(isa<ConstantInt>(CS.getArgOperand(4)),
|
2015-03-08 05:15:40 +08:00
|
|
|
"isvolatile argument of memory intrinsics must be a constant int",
|
2015-06-27 06:04:34 +08:00
|
|
|
CS);
|
2008-08-23 13:31:10 +08:00
|
|
|
break;
|
2015-03-02 17:35:06 +08:00
|
|
|
}
|
2008-08-23 17:46:46 +08:00
|
|
|
case Intrinsic::gcroot:
|
|
|
|
case Intrinsic::gcwrite:
|
2008-08-25 04:46:13 +08:00
|
|
|
case Intrinsic::gcread:
|
|
|
|
if (ID == Intrinsic::gcroot) {
|
2008-10-26 00:28:35 +08:00
|
|
|
AllocaInst *AI =
|
2015-06-27 06:04:34 +08:00
|
|
|
dyn_cast<AllocaInst>(CS.getArgOperand(0)->stripPointerCasts());
|
|
|
|
Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS);
|
|
|
|
Assert(isa<Constant>(CS.getArgOperand(1)),
|
|
|
|
"llvm.gcroot parameter #2 must be a constant.", CS);
|
2015-05-12 07:09:25 +08:00
|
|
|
if (!AI->getAllocatedType()->isPointerTy()) {
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(!isa<ConstantPointerNull>(CS.getArgOperand(1)),
|
2015-03-08 05:15:40 +08:00
|
|
|
"llvm.gcroot parameter #1 must either be a pointer alloca, "
|
|
|
|
"or argument #2 must be a non-null constant.",
|
2015-06-27 06:04:34 +08:00
|
|
|
CS);
|
2010-10-01 04:23:47 +08:00
|
|
|
}
|
2008-08-25 04:46:13 +08:00
|
|
|
}
|
2009-09-08 04:44:51 +08:00
|
|
|
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(CS.getParent()->getParent()->hasGC(),
|
|
|
|
"Enclosing function does not use GC.", CS);
|
2008-08-25 04:46:13 +08:00
|
|
|
break;
|
2007-09-30 00:25:54 +08:00
|
|
|
case Intrinsic::init_trampoline:
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(isa<Function>(CS.getArgOperand(1)->stripPointerCasts()),
|
2015-03-08 05:15:40 +08:00
|
|
|
"llvm.init_trampoline parameter #2 must resolve to a function.",
|
2015-06-27 06:04:34 +08:00
|
|
|
CS);
|
2007-12-25 10:02:10 +08:00
|
|
|
break;
|
2008-10-16 14:00:36 +08:00
|
|
|
case Intrinsic::prefetch:
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(isa<ConstantInt>(CS.getArgOperand(1)) &&
|
|
|
|
isa<ConstantInt>(CS.getArgOperand(2)) &&
|
|
|
|
cast<ConstantInt>(CS.getArgOperand(1))->getZExtValue() < 2 &&
|
|
|
|
cast<ConstantInt>(CS.getArgOperand(2))->getZExtValue() < 4,
|
|
|
|
"invalid arguments to llvm.prefetch", CS);
|
2008-10-16 14:00:36 +08:00
|
|
|
break;
|
2008-11-19 07:09:31 +08:00
|
|
|
case Intrinsic::stackprotector:
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(isa<AllocaInst>(CS.getArgOperand(1)->stripPointerCasts()),
|
|
|
|
"llvm.stackprotector parameter #2 must resolve to an alloca.", CS);
|
2008-11-19 07:09:31 +08:00
|
|
|
break;
|
2009-10-13 15:57:33 +08:00
|
|
|
case Intrinsic::lifetime_start:
|
|
|
|
case Intrinsic::lifetime_end:
|
|
|
|
case Intrinsic::invariant_start:
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(isa<ConstantInt>(CS.getArgOperand(0)),
|
2015-03-08 05:15:40 +08:00
|
|
|
"size argument of memory use markers must be a constant integer",
|
2015-06-27 06:04:34 +08:00
|
|
|
CS);
|
2009-10-13 15:57:33 +08:00
|
|
|
break;
|
|
|
|
case Intrinsic::invariant_end:
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(isa<ConstantInt>(CS.getArgOperand(1)),
|
|
|
|
"llvm.invariant.end parameter #2 must be a constant integer", CS);
|
2009-10-13 15:57:33 +08:00
|
|
|
break;
|
2015-01-13 08:48:10 +08:00
|
|
|
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
case Intrinsic::localescape: {
|
2015-06-27 06:04:34 +08:00
|
|
|
BasicBlock *BB = CS.getParent();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(BB == &BB->getParent()->front(),
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
"llvm.localescape used outside of entry block", CS);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(!SawFrameEscape,
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
"multiple calls to llvm.localescape in one function", CS);
|
2015-06-27 06:04:34 +08:00
|
|
|
for (Value *Arg : CS.args()) {
|
2015-04-03 05:13:31 +08:00
|
|
|
if (isa<ConstantPointerNull>(Arg))
|
|
|
|
continue; // Null values are allowed as placeholders.
|
2015-03-06 02:26:34 +08:00
|
|
|
auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(AI && AI->isStaticAlloca(),
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
"llvm.localescape only accepts static allocas", CS);
|
2015-03-06 02:26:34 +08:00
|
|
|
}
|
2015-06-27 06:04:34 +08:00
|
|
|
FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands();
|
2015-03-06 02:26:34 +08:00
|
|
|
SawFrameEscape = true;
|
2015-01-13 08:48:10 +08:00
|
|
|
break;
|
|
|
|
}
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
case Intrinsic::localrecover: {
|
2015-06-27 06:04:34 +08:00
|
|
|
Value *FnArg = CS.getArgOperand(0)->stripPointerCasts();
|
2015-01-13 08:48:10 +08:00
|
|
|
Function *Fn = dyn_cast<Function>(FnArg);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(Fn && !Fn->isDeclaration(),
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
"llvm.localrecover first "
|
2015-03-08 05:15:40 +08:00
|
|
|
"argument must be function defined in this module",
|
2015-06-27 06:04:34 +08:00
|
|
|
CS);
|
|
|
|
auto *IdxArg = dyn_cast<ConstantInt>(CS.getArgOperand(2));
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int",
|
2015-06-27 06:04:34 +08:00
|
|
|
CS);
|
2015-03-06 02:26:34 +08:00
|
|
|
auto &Entry = FrameEscapeInfo[Fn];
|
|
|
|
Entry.second = unsigned(
|
|
|
|
std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
|
2015-01-13 08:48:10 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-01-31 07:28:05 +08:00
|
|
|
case Intrinsic::experimental_gc_statepoint:
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(!CS.isInlineAsm(),
|
|
|
|
"gc.statepoint support for inline assembly unimplemented", CS);
|
|
|
|
Assert(CS.getParent()->getParent()->hasGC(),
|
|
|
|
"Enclosing function does not use GC.", CS);
|
2015-02-04 07:18:47 +08:00
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
verifyStatepoint(CS);
|
2014-12-02 05:18:12 +08:00
|
|
|
break;
|
2015-01-23 04:14:38 +08:00
|
|
|
case Intrinsic::experimental_gc_result: {
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(CS.getParent()->getParent()->hasGC(),
|
|
|
|
"Enclosing function does not use GC.", CS);
|
2014-12-02 05:18:12 +08:00
|
|
|
// Are we tied to a statepoint properly?
|
2015-06-27 06:04:34 +08:00
|
|
|
CallSite StatepointCS(CS.getArgOperand(0));
|
2015-01-08 06:48:01 +08:00
|
|
|
const Function *StatepointFn =
|
|
|
|
StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr;
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(StatepointFn && StatepointFn->isDeclaration() &&
|
|
|
|
StatepointFn->getIntrinsicID() ==
|
|
|
|
Intrinsic::experimental_gc_statepoint,
|
2015-06-27 06:04:34 +08:00
|
|
|
"gc.result operand #1 must be from a statepoint", CS,
|
|
|
|
CS.getArgOperand(0));
|
2014-12-04 03:53:15 +08:00
|
|
|
|
2014-12-04 06:23:24 +08:00
|
|
|
// Assert that result type matches wrapped callee.
|
2015-05-13 07:52:24 +08:00
|
|
|
const Value *Target = StatepointCS.getArgument(2);
|
2015-08-02 06:20:21 +08:00
|
|
|
auto *PT = cast<PointerType>(Target->getType());
|
|
|
|
auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(CS.getType() == TargetFuncType->getReturnType(),
|
|
|
|
"gc.result result type does not match wrapped callee", CS);
|
2014-12-02 05:18:12 +08:00
|
|
|
break;
|
2007-09-18 04:30:04 +08:00
|
|
|
}
|
2014-12-02 05:18:12 +08:00
|
|
|
case Intrinsic::experimental_gc_relocate: {
|
2015-06-27 06:04:34 +08:00
|
|
|
Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS);
|
2015-02-19 19:28:47 +08:00
|
|
|
|
2016-01-07 11:32:11 +08:00
|
|
|
Assert(isa<PointerType>(CS.getType()->getScalarType()),
|
|
|
|
"gc.relocate must return a pointer or a vector of pointers", CS);
|
|
|
|
|
2015-02-19 19:28:47 +08:00
|
|
|
// Check that this relocate is correctly tied to the statepoint
|
|
|
|
|
|
|
|
// This is case for relocate on the unwinding path of an invoke statepoint
|
2015-12-26 15:54:32 +08:00
|
|
|
if (LandingPadInst *LandingPad =
|
|
|
|
dyn_cast<LandingPadInst>(CS.getArgOperand(0))) {
|
2015-02-19 19:28:47 +08:00
|
|
|
|
2015-05-12 07:47:27 +08:00
|
|
|
const BasicBlock *InvokeBB =
|
2015-12-26 15:54:32 +08:00
|
|
|
LandingPad->getParent()->getUniquePredecessor();
|
2015-02-19 19:28:47 +08:00
|
|
|
|
|
|
|
// Landingpad relocates should have only one predecessor with invoke
|
|
|
|
// statepoint terminator
|
2015-05-12 07:47:27 +08:00
|
|
|
Assert(InvokeBB, "safepoints should have unique landingpads",
|
2015-12-26 15:54:32 +08:00
|
|
|
LandingPad->getParent());
|
2015-05-12 07:47:27 +08:00
|
|
|
Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
|
|
|
|
InvokeBB);
|
|
|
|
Assert(isStatepoint(InvokeBB->getTerminator()),
|
|
|
|
"gc relocate should be linked to a statepoint", InvokeBB);
|
2015-02-19 19:28:47 +08:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// In all other cases relocate should be tied to the statepoint directly.
|
|
|
|
// This covers relocates on a normal return path of invoke statepoint and
|
2016-02-01 00:32:23 +08:00
|
|
|
// relocates of a call statepoint.
|
2015-06-27 06:04:34 +08:00
|
|
|
auto Token = CS.getArgOperand(0);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
|
2015-06-27 06:04:34 +08:00
|
|
|
"gc relocate is incorrectly tied to the statepoint", CS, Token);
|
2015-02-19 19:28:47 +08:00
|
|
|
}
|
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
// Verify rest of the relocate arguments.
|
2015-02-19 19:28:47 +08:00
|
|
|
|
2016-01-05 12:03:00 +08:00
|
|
|
ImmutableCallSite StatepointCS(
|
|
|
|
cast<GCRelocateInst>(*CS.getInstruction()).getStatepoint());
|
2014-12-02 05:18:12 +08:00
|
|
|
|
2016-02-01 00:32:23 +08:00
|
|
|
// Both the base and derived must be piped through the safepoint.
|
2015-06-27 06:04:34 +08:00
|
|
|
Value* Base = CS.getArgOperand(1);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(isa<ConstantInt>(Base),
|
2015-06-27 06:04:34 +08:00
|
|
|
"gc.relocate operand #2 must be integer offset", CS);
|
2015-03-08 05:15:40 +08:00
|
|
|
|
2015-06-27 06:04:34 +08:00
|
|
|
Value* Derived = CS.getArgOperand(2);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(isa<ConstantInt>(Derived),
|
2015-06-27 06:04:34 +08:00
|
|
|
"gc.relocate operand #3 must be integer offset", CS);
|
2014-12-02 05:18:12 +08:00
|
|
|
|
|
|
|
const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
|
|
|
|
const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
|
|
|
|
// Check the bounds
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(),
|
2015-06-27 06:04:34 +08:00
|
|
|
"gc.relocate: statepoint base index out of bounds", CS);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(),
|
2015-06-27 06:04:34 +08:00
|
|
|
"gc.relocate: statepoint derived index out of bounds", CS);
|
2015-01-08 06:48:01 +08:00
|
|
|
|
|
|
|
// Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
|
2016-02-01 00:32:23 +08:00
|
|
|
// section of the statepoint's argument.
|
2015-03-10 13:58:21 +08:00
|
|
|
Assert(StatepointCS.arg_size() > 0,
|
|
|
|
"gc.statepoint: insufficient arguments");
|
2015-05-13 07:52:24 +08:00
|
|
|
Assert(isa<ConstantInt>(StatepointCS.getArgument(3)),
|
2015-03-11 14:57:30 +08:00
|
|
|
"gc.statement: number of call arguments must be constant integer");
|
2015-03-10 13:58:21 +08:00
|
|
|
const unsigned NumCallArgs =
|
2015-05-13 07:52:24 +08:00
|
|
|
cast<ConstantInt>(StatepointCS.getArgument(3))->getZExtValue();
|
|
|
|
Assert(StatepointCS.arg_size() > NumCallArgs + 5,
|
2015-03-10 13:58:21 +08:00
|
|
|
"gc.statepoint: mismatch in number of call arguments");
|
2015-05-13 07:52:24 +08:00
|
|
|
Assert(isa<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)),
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
"gc.statepoint: number of transition arguments must be "
|
|
|
|
"a constant integer");
|
|
|
|
const int NumTransitionArgs =
|
2015-05-13 07:52:24 +08:00
|
|
|
cast<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5))
|
|
|
|
->getZExtValue();
|
|
|
|
const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
Assert(isa<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)),
|
2015-03-11 14:57:30 +08:00
|
|
|
"gc.statepoint: number of deoptimization arguments must be "
|
|
|
|
"a constant integer");
|
2015-01-08 06:48:01 +08:00
|
|
|
const int NumDeoptArgs =
|
2016-02-01 00:32:23 +08:00
|
|
|
cast<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart))
|
|
|
|
->getZExtValue();
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
|
2015-01-08 06:48:01 +08:00
|
|
|
const int GCParamArgsEnd = StatepointCS.arg_size();
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
|
|
|
|
"gc.relocate: statepoint base index doesn't fall within the "
|
|
|
|
"'gc parameters' section of the statepoint call",
|
2015-06-27 06:04:34 +08:00
|
|
|
CS);
|
2015-03-08 05:15:40 +08:00
|
|
|
Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
|
|
|
|
"gc.relocate: statepoint derived index doesn't fall within the "
|
|
|
|
"'gc parameters' section of the statepoint call",
|
2015-06-27 06:04:34 +08:00
|
|
|
CS);
|
2014-12-04 03:53:15 +08:00
|
|
|
|
2016-01-07 11:32:11 +08:00
|
|
|
// Relocated value must be either a pointer type or vector-of-pointer type,
|
|
|
|
// but gc_relocate does not need to return the same pointer type as the
|
|
|
|
// relocated pointer. It can be casted to the correct type later if it's
|
|
|
|
// desired. However, they must have the same address space and 'vectorness'
|
2016-01-05 12:03:00 +08:00
|
|
|
GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction());
|
2016-01-07 11:32:11 +08:00
|
|
|
Assert(Relocate.getDerivedPtr()->getType()->getScalarType()->isPointerTy(),
|
2015-06-27 06:04:34 +08:00
|
|
|
"gc.relocate: relocated value must be a gc pointer", CS);
|
2015-05-19 03:50:14 +08:00
|
|
|
|
2016-01-07 11:32:11 +08:00
|
|
|
auto ResultType = CS.getType();
|
|
|
|
auto DerivedType = Relocate.getDerivedPtr()->getType();
|
|
|
|
Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
|
2016-02-01 00:32:23 +08:00
|
|
|
"gc.relocate: vector relocates to vector and pointer to pointer",
|
|
|
|
CS);
|
|
|
|
Assert(
|
|
|
|
ResultType->getPointerAddressSpace() ==
|
|
|
|
DerivedType->getPointerAddressSpace(),
|
|
|
|
"gc.relocate: relocating a pointer shouldn't change its address space",
|
|
|
|
CS);
|
2014-12-02 05:18:12 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-10-07 08:27:33 +08:00
|
|
|
case Intrinsic::eh_exceptioncode:
|
2015-09-03 17:15:32 +08:00
|
|
|
case Intrinsic::eh_exceptionpointer: {
|
|
|
|
Assert(isa<CatchPadInst>(CS.getArgOperand(0)),
|
|
|
|
"eh.exceptionpointer argument must be a catchpad", CS);
|
|
|
|
break;
|
|
|
|
}
|
2016-02-10 05:43:12 +08:00
|
|
|
case Intrinsic::masked_load: {
|
|
|
|
Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS);
|
|
|
|
|
|
|
|
Value *Ptr = CS.getArgOperand(0);
|
|
|
|
//Value *Alignment = CS.getArgOperand(1);
|
|
|
|
Value *Mask = CS.getArgOperand(2);
|
|
|
|
Value *PassThru = CS.getArgOperand(3);
|
|
|
|
Assert(Mask->getType()->isVectorTy(),
|
|
|
|
"masked_load: mask must be vector", CS);
|
|
|
|
|
|
|
|
// DataTy is the overloaded type
|
|
|
|
Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
|
|
|
|
Assert(DataTy == CS.getType(),
|
|
|
|
"masked_load: return must match pointer type", CS);
|
|
|
|
Assert(PassThru->getType() == DataTy,
|
|
|
|
"masked_load: pass through and data type must match", CS);
|
|
|
|
Assert(Mask->getType()->getVectorNumElements() ==
|
|
|
|
DataTy->getVectorNumElements(),
|
|
|
|
"masked_load: vector mask must be same length as data", CS);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Intrinsic::masked_store: {
|
|
|
|
Value *Val = CS.getArgOperand(0);
|
|
|
|
Value *Ptr = CS.getArgOperand(1);
|
|
|
|
//Value *Alignment = CS.getArgOperand(2);
|
|
|
|
Value *Mask = CS.getArgOperand(3);
|
|
|
|
Assert(Mask->getType()->isVectorTy(),
|
|
|
|
"masked_store: mask must be vector", CS);
|
|
|
|
|
|
|
|
// DataTy is the overloaded type
|
|
|
|
Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
|
|
|
|
Assert(DataTy == Val->getType(),
|
|
|
|
"masked_store: storee must match pointer type", CS);
|
|
|
|
Assert(Mask->getType()->getVectorNumElements() ==
|
|
|
|
DataTy->getVectorNumElements(),
|
|
|
|
"masked_store: vector mask must be same length as data", CS);
|
|
|
|
break;
|
|
|
|
}
|
Introduce @llvm.experimental.deoptimize
Summary:
This intrinsic, together with deoptimization operand bundles, allow
frontends to express transfer of control and frame-local state from
one (typically more specialized, hence faster) version of a function
into another (typically more generic, hence slower) version.
In languages with a fully integrated managed runtime this intrinsic can
be used to implement "uncommon trap" like functionality. In unmanaged
languages like C and C++, this intrinsic can be used to represent the
slow paths of specialized functions.
Note: this change does not address how `@llvm.experimental_deoptimize`
is lowered. That will be done in a later change.
Reviewers: chandlerc, rnk, atrick, reames
Subscribers: llvm-commits, kmod, mjacob, maksfb, mcrosier, JosephTremoulet
Differential Revision: http://reviews.llvm.org/D17732
llvm-svn: 263281
2016-03-12 03:08:34 +08:00
|
|
|
|
2016-03-31 08:18:46 +08:00
|
|
|
case Intrinsic::experimental_guard: {
|
|
|
|
Assert(CS.isCall(), "experimental_guard cannot be invoked", CS);
|
|
|
|
Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
|
|
|
|
"experimental_guard must have exactly one "
|
|
|
|
"\"deopt\" operand bundle");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
Introduce @llvm.experimental.deoptimize
Summary:
This intrinsic, together with deoptimization operand bundles, allow
frontends to express transfer of control and frame-local state from
one (typically more specialized, hence faster) version of a function
into another (typically more generic, hence slower) version.
In languages with a fully integrated managed runtime this intrinsic can
be used to implement "uncommon trap" like functionality. In unmanaged
languages like C and C++, this intrinsic can be used to represent the
slow paths of specialized functions.
Note: this change does not address how `@llvm.experimental_deoptimize`
is lowered. That will be done in a later change.
Reviewers: chandlerc, rnk, atrick, reames
Subscribers: llvm-commits, kmod, mjacob, maksfb, mcrosier, JosephTremoulet
Differential Revision: http://reviews.llvm.org/D17732
llvm-svn: 263281
2016-03-12 03:08:34 +08:00
|
|
|
case Intrinsic::experimental_deoptimize: {
|
|
|
|
Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS);
|
|
|
|
Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
|
|
|
|
"experimental_deoptimize must have exactly one "
|
|
|
|
"\"deopt\" operand bundle");
|
|
|
|
Assert(CS.getType() == CS.getInstruction()->getFunction()->getReturnType(),
|
|
|
|
"experimental_deoptimize return type must match caller return type");
|
|
|
|
|
|
|
|
if (CS.isCall()) {
|
|
|
|
auto *DeoptCI = CS.getInstruction();
|
|
|
|
auto *RI = dyn_cast<ReturnInst>(DeoptCI->getNextNode());
|
|
|
|
Assert(RI,
|
|
|
|
"calls to experimental_deoptimize must be followed by a return");
|
|
|
|
|
|
|
|
if (!CS.getType()->isVoidTy() && RI)
|
|
|
|
Assert(RI->getReturnValue() == DeoptCI,
|
|
|
|
"calls to experimental_deoptimize must be followed by a return "
|
|
|
|
"of the value computed by experimental_deoptimize");
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2014-12-02 05:18:12 +08:00
|
|
|
};
|
2002-04-19 04:37:37 +08:00
|
|
|
}
|
|
|
|
|
2015-04-16 06:15:46 +08:00
|
|
|
/// \brief Carefully grab the subprogram from a local scope.
|
|
|
|
///
|
|
|
|
/// This carefully grabs the subprogram from a local scope, avoiding the
|
|
|
|
/// built-in assertions that would typically fire.
|
2015-04-30 00:38:44 +08:00
|
|
|
static DISubprogram *getSubprogram(Metadata *LocalScope) {
|
2015-04-16 06:15:46 +08:00
|
|
|
if (!LocalScope)
|
|
|
|
return nullptr;
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
|
2015-04-16 06:15:46 +08:00
|
|
|
return SP;
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
|
2015-04-16 06:15:46 +08:00
|
|
|
return getSubprogram(LB->getRawScope());
|
|
|
|
|
|
|
|
// Just return null; broken scope chains are checked elsewhere.
|
2015-04-30 00:38:44 +08:00
|
|
|
assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
|
2015-04-16 06:15:46 +08:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2015-03-15 09:21:30 +08:00
|
|
|
template <class DbgIntrinsicTy>
|
|
|
|
void Verifier::visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII) {
|
|
|
|
auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
|
|
|
|
Assert(isa<ValueAsMetadata>(MD) ||
|
|
|
|
(isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
|
|
|
|
"invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DILocalVariable>(DII.getRawVariable()),
|
2015-03-15 09:21:30 +08:00
|
|
|
"invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
|
|
|
|
DII.getRawVariable());
|
2015-04-30 00:38:44 +08:00
|
|
|
Assert(isa<DIExpression>(DII.getRawExpression()),
|
2015-03-15 09:21:30 +08:00
|
|
|
"invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
|
|
|
|
DII.getRawExpression());
|
Verifier: Check that inlined-at locations agree
Check that the `MDLocalVariable::getInlinedAt()` in a debug info
intrinsic's variable always matches the `MDLocation::getInlinedAt()` of
its `!dbg` attachment.
The goal here is to get rid of `MDLocalVariable::getInlinedAt()`
entirely (PR22778), since it's expensive and unnecessary, but I'll let
this verifier check bake for a while (a week maybe?) first. I've
updated the testcases that had the wrong value for `inlinedAt:`.
This checks that things are sane in the IR, but currently things go out
of whack in a few places in the backend. I'll follow shortly with
assertions in the backend (with code fixes).
If you have out-of-tree testcases that just started failing, here's how
I updated these ones:
1. The verifier check gives you the basic block, function, instruction,
and relevant metadata arguments (metadata numbering doesn't
necessarily match the source file, unfortunately).
2. Look at the `@llvm.dbg.*()` instruction, and compare the
`inlinedAt:` fields of the variable argument (second `metadata`
argument) and the `!dbg` attachment.
3. Figure out based on the variable `scope:` chain and the functions in
the file whether the variable has been inlined (and into what), so
you can determine which `inlinedAt:` is actually correct. In all of
the in-tree testcases, the `!MDLocation()` was correct and the
`!MDLocalVariable()` was wrong, but YMMV.
4. Duplicate the metadata that you're going to change, and add/drop the
`inlinedAt:` field from one of them. Be careful that the other
references to the same metadata node point at the correct one.
llvm-svn: 234021
2015-04-04 00:54:30 +08:00
|
|
|
|
|
|
|
// Ignore broken !dbg attachments; they're checked elsewhere.
|
|
|
|
if (MDNode *N = DII.getDebugLoc().getAsMDNode())
|
2015-04-30 00:38:44 +08:00
|
|
|
if (!isa<DILocation>(N))
|
Verifier: Check that inlined-at locations agree
Check that the `MDLocalVariable::getInlinedAt()` in a debug info
intrinsic's variable always matches the `MDLocation::getInlinedAt()` of
its `!dbg` attachment.
The goal here is to get rid of `MDLocalVariable::getInlinedAt()`
entirely (PR22778), since it's expensive and unnecessary, but I'll let
this verifier check bake for a while (a week maybe?) first. I've
updated the testcases that had the wrong value for `inlinedAt:`.
This checks that things are sane in the IR, but currently things go out
of whack in a few places in the backend. I'll follow shortly with
assertions in the backend (with code fixes).
If you have out-of-tree testcases that just started failing, here's how
I updated these ones:
1. The verifier check gives you the basic block, function, instruction,
and relevant metadata arguments (metadata numbering doesn't
necessarily match the source file, unfortunately).
2. Look at the `@llvm.dbg.*()` instruction, and compare the
`inlinedAt:` fields of the variable argument (second `metadata`
argument) and the `!dbg` attachment.
3. Figure out based on the variable `scope:` chain and the functions in
the file whether the variable has been inlined (and into what), so
you can determine which `inlinedAt:` is actually correct. In all of
the in-tree testcases, the `!MDLocation()` was correct and the
`!MDLocalVariable()` was wrong, but YMMV.
4. Duplicate the metadata that you're going to change, and add/drop the
`inlinedAt:` field from one of them. Be careful that the other
references to the same metadata node point at the correct one.
llvm-svn: 234021
2015-04-04 00:54:30 +08:00
|
|
|
return;
|
|
|
|
|
2015-04-16 06:15:46 +08:00
|
|
|
BasicBlock *BB = DII.getParent();
|
|
|
|
Function *F = BB ? BB->getParent() : nullptr;
|
|
|
|
|
2015-04-16 06:29:27 +08:00
|
|
|
// The scopes for variables and !dbg attachments must agree.
|
2015-04-30 00:38:44 +08:00
|
|
|
DILocalVariable *Var = DII.getVariable();
|
|
|
|
DILocation *Loc = DII.getDebugLoc();
|
2015-04-16 06:15:46 +08:00
|
|
|
Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
|
|
|
|
&DII, BB, F);
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
DISubprogram *VarSP = getSubprogram(Var->getRawScope());
|
|
|
|
DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
|
2015-04-16 06:15:46 +08:00
|
|
|
if (!VarSP || !LocSP)
|
|
|
|
return; // Broken scope chains are checked elsewhere.
|
|
|
|
|
|
|
|
Assert(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
|
|
|
|
" variable and !dbg attachment",
|
|
|
|
&DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
|
|
|
|
Loc->getScope()->getSubprogram());
|
2015-03-15 09:21:30 +08:00
|
|
|
}
|
|
|
|
|
2016-04-24 05:08:00 +08:00
|
|
|
static uint64_t getVariableSize(const DILocalVariable &V) {
|
2015-04-14 02:53:11 +08:00
|
|
|
// Be careful of broken types (checked elsewhere).
|
|
|
|
const Metadata *RawType = V.getRawType();
|
|
|
|
while (RawType) {
|
|
|
|
// Try to get the size directly.
|
2015-04-30 00:38:44 +08:00
|
|
|
if (auto *T = dyn_cast<DIType>(RawType))
|
2015-04-14 02:53:11 +08:00
|
|
|
if (uint64_t Size = T->getSizeInBits())
|
|
|
|
return Size;
|
|
|
|
|
2015-04-30 00:38:44 +08:00
|
|
|
if (auto *DT = dyn_cast<DIDerivedType>(RawType)) {
|
2015-04-14 02:53:11 +08:00
|
|
|
// Look at the base type.
|
|
|
|
RawType = DT->getRawBaseType();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Missing type or size.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail gracefully.
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-24 05:08:00 +08:00
|
|
|
void Verifier::verifyBitPieceExpression(const DbgInfoIntrinsic &I) {
|
2015-04-30 00:38:44 +08:00
|
|
|
DILocalVariable *V;
|
|
|
|
DIExpression *E;
|
2015-04-14 02:53:11 +08:00
|
|
|
if (auto *DVI = dyn_cast<DbgValueInst>(&I)) {
|
2015-04-30 00:38:44 +08:00
|
|
|
V = dyn_cast_or_null<DILocalVariable>(DVI->getRawVariable());
|
|
|
|
E = dyn_cast_or_null<DIExpression>(DVI->getRawExpression());
|
2015-04-14 02:53:11 +08:00
|
|
|
} else {
|
|
|
|
auto *DDI = cast<DbgDeclareInst>(&I);
|
2015-04-30 00:38:44 +08:00
|
|
|
V = dyn_cast_or_null<DILocalVariable>(DDI->getRawVariable());
|
|
|
|
E = dyn_cast_or_null<DIExpression>(DDI->getRawExpression());
|
2015-04-14 02:53:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// We don't know whether this intrinsic verified correctly.
|
|
|
|
if (!V || !E || !E->isValid())
|
|
|
|
return;
|
|
|
|
|
2016-01-15 10:12:38 +08:00
|
|
|
// Nothing to do if this isn't a bit piece expression.
|
|
|
|
if (!E->isBitPiece())
|
|
|
|
return;
|
|
|
|
|
2015-04-30 00:52:17 +08:00
|
|
|
// The frontend helps out GDB by emitting the members of local anonymous
|
|
|
|
// unions as artificial local variables with shared storage. When SROA splits
|
|
|
|
// the storage for artificial local variables that are smaller than the entire
|
|
|
|
// union, the overhang piece will be outside of the allotted space for the
|
|
|
|
// variable and this check fails.
|
|
|
|
// FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
|
|
|
|
if (V->isArtificial())
|
|
|
|
return;
|
|
|
|
|
2015-04-14 02:53:11 +08:00
|
|
|
// If there's no size, the type is broken, but that should be checked
|
|
|
|
// elsewhere.
|
2016-04-24 05:08:00 +08:00
|
|
|
uint64_t VarSize = getVariableSize(*V);
|
2015-04-14 02:53:11 +08:00
|
|
|
if (!VarSize)
|
|
|
|
return;
|
|
|
|
|
2016-01-15 10:12:38 +08:00
|
|
|
unsigned PieceSize = E->getBitPieceSize();
|
|
|
|
unsigned PieceOffset = E->getBitPieceOffset();
|
|
|
|
Assert(PieceSize + PieceOffset <= VarSize,
|
|
|
|
"piece is larger than or outside of variable", &I, V, E);
|
|
|
|
Assert(PieceSize != VarSize, "piece covers entire variable", &I, V, E);
|
2015-04-14 02:53:11 +08:00
|
|
|
}
|
|
|
|
|
2016-03-29 05:06:26 +08:00
|
|
|
void Verifier::verifyCompileUnits() {
|
|
|
|
auto *CUs = M->getNamedMetadata("llvm.dbg.cu");
|
|
|
|
SmallPtrSet<const Metadata *, 2> Listed;
|
|
|
|
if (CUs)
|
|
|
|
Listed.insert(CUs->op_begin(), CUs->op_end());
|
|
|
|
Assert(
|
|
|
|
std::all_of(CUVisited.begin(), CUVisited.end(),
|
|
|
|
[&Listed](const Metadata *CU) { return Listed.count(CU); }),
|
|
|
|
"All DICompileUnits must be listed in llvm.dbg.cu");
|
|
|
|
CUVisited.clear();
|
|
|
|
}
|
|
|
|
|
2002-04-19 04:37:37 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Implement the public interfaces to this file...
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
bool llvm::verifyFunction(const Function &f, raw_ostream *OS) {
|
2014-01-17 19:09:34 +08:00
|
|
|
Function &F = const_cast<Function &>(f);
|
2007-01-31 04:08:39 +08:00
|
|
|
assert(!F.isDeclaration() && "Cannot verify external functions");
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2016-04-21 00:17:37 +08:00
|
|
|
// Don't use a raw_null_ostream. Printing IR is expensive.
|
|
|
|
Verifier V(OS);
|
2014-01-19 10:22:18 +08:00
|
|
|
|
|
|
|
// Note that this function's return value is inverted from what you would
|
|
|
|
// expect of a function called "verify".
|
|
|
|
return !V.verify(F);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool llvm::verifyModule(const Module &M, raw_ostream *OS) {
|
2016-04-21 00:17:37 +08:00
|
|
|
// Don't use a raw_null_ostream. Printing IR is expensive.
|
|
|
|
Verifier V(OS);
|
2014-01-19 10:22:18 +08:00
|
|
|
|
|
|
|
bool Broken = false;
|
2016-02-26 00:44:27 +08:00
|
|
|
for (const Function &F : M)
|
|
|
|
if (!F.isDeclaration() && !F.isMaterializable())
|
|
|
|
Broken |= !V.verify(F);
|
2014-01-19 10:22:18 +08:00
|
|
|
|
|
|
|
// Note that this function's return value is inverted from what you would
|
|
|
|
// expect of a function called "verify".
|
Verifier: Remove the separate DebugInfoVerifier class
Remove the separate `DebugInfoVerifier` class, as a partial step toward
better integrating debug info verification with the `Verifier`.
Right now, verification of debug info is kind of a mess.
- There are `DIDescriptor::Verify()` checks live in `DebugInfo.cpp`.
These return `bool`, and there's no way to see (except by opening a
debugger) why they fail.
- We rely on `DebugInfoFinder` to traverse the debug info graph and
dig up nodes. However, the regular `Verifier` visits many of these
nodes when it calls into debug info intrinsic operands. Visiting
twice and running different checks is kind of absurd.
- Moreover, `DebugInfoFinder` asserts on failed type resolution -- the
verifier should never assert!
By integrating the two verifiers, I'm aiming at solving these problems
(work to be done, obviously). Verification can be localized to the
`Verifier`; we can use a naive `MDNode` operand traversal to find all
the nodes; we can verify type references instead of asserting on
failure.
There are `assert()`s sprinkled throughout the optimizer and dwarf
backend on `DIDescriptor::Verify()` checks. This is a hangover from
when the debug info verifier was off, so I plan to remove them as I go
(once I confirm that the checks are done at verification time).
Note: to keep the behaviour of only running the debug info verifier when
-verify succeeds, I've added an `EverBroken` flag. Once the
`DebugInfoFinder` assertions are gone and the two traversals have been
merged, I expect to be able to remove this.
llvm-svn: 232790
2015-03-20 08:48:23 +08:00
|
|
|
return !V.verify(M) || Broken;
|
2014-01-19 10:22:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
2014-01-20 19:34:08 +08:00
|
|
|
struct VerifierLegacyPass : public FunctionPass {
|
2014-01-19 10:22:18 +08:00
|
|
|
static char ID;
|
|
|
|
|
|
|
|
Verifier V;
|
|
|
|
bool FatalErrors;
|
|
|
|
|
2016-04-21 00:17:37 +08:00
|
|
|
VerifierLegacyPass() : FunctionPass(ID), V(&dbgs()), FatalErrors(true) {
|
2014-01-20 19:34:08 +08:00
|
|
|
initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
|
2014-01-19 10:22:18 +08:00
|
|
|
}
|
2014-01-20 19:34:08 +08:00
|
|
|
explicit VerifierLegacyPass(bool FatalErrors)
|
2016-04-21 00:17:37 +08:00
|
|
|
: FunctionPass(ID), V(&dbgs()), FatalErrors(FatalErrors) {
|
2014-01-20 19:34:08 +08:00
|
|
|
initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
|
2014-01-19 10:22:18 +08:00
|
|
|
}
|
|
|
|
|
2014-03-05 14:35:38 +08:00
|
|
|
bool runOnFunction(Function &F) override {
|
2014-01-19 10:22:18 +08:00
|
|
|
if (!V.verify(F) && FatalErrors)
|
|
|
|
report_fatal_error("Broken function found, compilation aborted!");
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-03-05 14:35:38 +08:00
|
|
|
bool doFinalization(Module &M) override {
|
2014-01-19 10:22:18 +08:00
|
|
|
if (!V.verify(M) && FatalErrors)
|
|
|
|
report_fatal_error("Broken module found, compilation aborted!");
|
|
|
|
|
2014-04-16 00:27:38 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesAll();
|
|
|
|
}
|
|
|
|
};
|
2015-06-23 17:49:53 +08:00
|
|
|
}
|
2002-02-21 01:55:43 +08:00
|
|
|
|
2014-01-20 19:34:08 +08:00
|
|
|
char VerifierLegacyPass::ID = 0;
|
|
|
|
INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
|
2009-09-08 04:44:51 +08:00
|
|
|
|
2014-01-19 10:22:18 +08:00
|
|
|
FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
|
2014-01-20 19:34:08 +08:00
|
|
|
return new VerifierLegacyPass(FatalErrors);
|
2001-06-07 04:29:01 +08:00
|
|
|
}
|
2014-01-19 10:22:18 +08:00
|
|
|
|
2015-01-05 10:47:05 +08:00
|
|
|
PreservedAnalyses VerifierPass::run(Module &M) {
|
|
|
|
if (verifyModule(M, &dbgs()) && FatalErrors)
|
2014-01-20 19:34:08 +08:00
|
|
|
report_fatal_error("Broken module found, compilation aborted!");
|
|
|
|
|
|
|
|
return PreservedAnalyses::all();
|
|
|
|
}
|
|
|
|
|
2015-01-05 10:47:05 +08:00
|
|
|
PreservedAnalyses VerifierPass::run(Function &F) {
|
|
|
|
if (verifyFunction(F, &dbgs()) && FatalErrors)
|
2014-01-20 19:34:08 +08:00
|
|
|
report_fatal_error("Broken function found, compilation aborted!");
|
|
|
|
|
|
|
|
return PreservedAnalyses::all();
|
|
|
|
}
|