2003-10-13 11:32:08 +08:00
|
|
|
//===-- Instruction.cpp - Implement the Instruction class -----------------===//
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2001-06-07 04:29:01 +08:00
|
|
|
//
|
2013-01-02 17:10:48 +08:00
|
|
|
// This file implements the Instruction class for the IR library.
|
2001-06-07 04:29:01 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Instruction.h"
|
2018-06-20 07:42:17 +08:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/ADT/DenseSet.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
2017-03-21 00:40:44 +08:00
|
|
|
#include "llvm/IR/MDBuilder.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Operator.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
2003-11-21 01:45:12 +08:00
|
|
|
using namespace llvm;
|
2003-11-12 06:41:34 +08:00
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
|
2007-02-24 08:55:48 +08:00
|
|
|
Instruction *InsertBefore)
|
2014-04-09 14:08:46 +08:00
|
|
|
: User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
|
2002-09-10 23:45:53 +08:00
|
|
|
|
|
|
|
// If requested, insert this instruction into a basic block...
|
|
|
|
if (InsertBefore) {
|
2015-06-16 01:03:35 +08:00
|
|
|
BasicBlock *BB = InsertBefore->getParent();
|
|
|
|
assert(BB && "Instruction to insert before is not in a basic block!");
|
2015-10-09 07:49:46 +08:00
|
|
|
BB->getInstList().insert(InsertBefore->getIterator(), this);
|
2002-09-10 23:45:53 +08:00
|
|
|
}
|
2001-06-07 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
|
2007-02-24 08:55:48 +08:00
|
|
|
BasicBlock *InsertAtEnd)
|
2014-04-09 14:08:46 +08:00
|
|
|
: User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
|
2004-05-27 05:41:09 +08:00
|
|
|
|
|
|
|
// append this instruction into the basic block
|
|
|
|
assert(InsertAtEnd && "Basic block to append to may not be NULL!");
|
|
|
|
InsertAtEnd->getInstList().push_back(this);
|
2007-02-13 15:54:42 +08:00
|
|
|
}
|
|
|
|
|
2007-12-10 10:14:30 +08:00
|
|
|
Instruction::~Instruction() {
|
2014-04-15 14:32:26 +08:00
|
|
|
assert(!Parent && "Instruction still linked in the program!");
|
[Instruction] Set metadata uses to undef on deletion
Summary:
Replace any extant metadata uses of a dying instruction with undef to
preserve debug info accuracy. Some alternatives include:
- Treat Instruction like any other Value, and point its extant metadata
uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
trivially dead (i.e. fair game for deletion in many passes), leading to
stale dbg.values being in effect for too long.
- Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
correct. OTOH results in wasted work in some common cases (e.g. when all
instructions in a BasicBlock are deleted).
This came up while discussing some basic cases in
https://reviews.llvm.org/D80052.
Reviewers: jmorse, TWeaver, aprantl, dexonsmith, jdoerfert
Subscribers: jholewinski, qcolombet, hiraditya, jfb, sstefan1, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D80264
2020-05-20 09:03:22 +08:00
|
|
|
|
|
|
|
// Replace any extant metadata uses of this instruction with undef to
|
|
|
|
// preserve debug info accuracy. Some alternatives include:
|
|
|
|
// - Treat Instruction like any other Value, and point its extant metadata
|
|
|
|
// uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
|
|
|
|
// trivially dead (i.e. fair game for deletion in many passes), leading to
|
|
|
|
// stale dbg.values being in effect for too long.
|
|
|
|
// - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
|
|
|
|
// correct. OTOH results in wasted work in some common cases (e.g. when all
|
|
|
|
// instructions in a BasicBlock are deleted).
|
|
|
|
if (isUsedByMetadata())
|
|
|
|
ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
|
|
|
|
|
2010-07-21 06:25:04 +08:00
|
|
|
if (hasMetadataHashEntry())
|
|
|
|
clearMetadataHashEntries();
|
2006-06-22 00:53:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-07 05:33:15 +08:00
|
|
|
void Instruction::setParent(BasicBlock *P) {
|
|
|
|
Parent = P;
|
|
|
|
}
|
|
|
|
|
2015-03-04 06:01:13 +08:00
|
|
|
const Module *Instruction::getModule() const {
|
|
|
|
return getParent()->getModule();
|
|
|
|
}
|
|
|
|
|
2015-12-08 08:13:12 +08:00
|
|
|
const Function *Instruction::getFunction() const {
|
|
|
|
return getParent()->getParent();
|
|
|
|
}
|
2015-05-27 05:03:23 +08:00
|
|
|
|
2004-10-12 06:21:39 +08:00
|
|
|
void Instruction::removeFromParent() {
|
2015-10-09 07:49:46 +08:00
|
|
|
getParent()->getInstList().remove(getIterator());
|
2004-10-12 06:21:39 +08:00
|
|
|
}
|
|
|
|
|
2015-04-02 08:03:07 +08:00
|
|
|
iplist<Instruction>::iterator Instruction::eraseFromParent() {
|
2015-10-09 07:49:46 +08:00
|
|
|
return getParent()->getInstList().erase(getIterator());
|
2004-10-12 06:21:39 +08:00
|
|
|
}
|
2002-07-15 07:09:40 +08:00
|
|
|
|
2016-01-06 08:18:29 +08:00
|
|
|
/// Insert an unlinked instruction into a basic block immediately before the
|
|
|
|
/// specified instruction.
|
2008-06-18 02:29:27 +08:00
|
|
|
void Instruction::insertBefore(Instruction *InsertPos) {
|
2015-10-09 07:49:46 +08:00
|
|
|
InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
|
2008-06-18 02:29:27 +08:00
|
|
|
}
|
|
|
|
|
2016-01-06 08:18:29 +08:00
|
|
|
/// Insert an unlinked instruction into a basic block immediately after the
|
|
|
|
/// specified instruction.
|
2009-01-13 15:43:51 +08:00
|
|
|
void Instruction::insertAfter(Instruction *InsertPos) {
|
2015-10-09 07:49:46 +08:00
|
|
|
InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
|
|
|
|
this);
|
2009-01-13 15:43:51 +08:00
|
|
|
}
|
|
|
|
|
2016-01-06 08:18:29 +08:00
|
|
|
/// Unlink this instruction from its current basic block and insert it into the
|
|
|
|
/// basic block that MovePos lives in, right before MovePos.
|
2005-08-08 13:21:50 +08:00
|
|
|
void Instruction::moveBefore(Instruction *MovePos) {
|
2016-08-17 09:54:41 +08:00
|
|
|
moveBefore(*MovePos->getParent(), MovePos->getIterator());
|
|
|
|
}
|
|
|
|
|
2017-08-29 22:07:48 +08:00
|
|
|
void Instruction::moveAfter(Instruction *MovePos) {
|
|
|
|
moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
|
|
|
|
}
|
|
|
|
|
2016-08-17 09:54:41 +08:00
|
|
|
void Instruction::moveBefore(BasicBlock &BB,
|
|
|
|
SymbolTableList<Instruction>::iterator I) {
|
|
|
|
assert(I == BB.end() || I->getParent() == &BB);
|
|
|
|
BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
|
2005-08-08 13:21:50 +08:00
|
|
|
}
|
|
|
|
|
[IR] Lazily number instructions for local dominance queries
Essentially, fold OrderedBasicBlock into BasicBlock, and make it
auto-invalidate the instruction ordering when new instructions are
added. Notably, we don't need to invalidate it when removing
instructions, which is helpful when a pass mostly delete dead
instructions rather than transforming them.
The downside is that Instruction grows from 56 bytes to 64 bytes. The
resulting LLVM code is substantially simpler and automatically handles
invalidation, which makes me think that this is the right speed and size
tradeoff.
The important change is in SymbolTableTraitsImpl.h, where the numbering
is invalidated. Everything else should be straightforward.
We probably want to implement a fancier re-numbering scheme so that
local updates don't invalidate the ordering, but I plan for that to be
future work, maybe for someone else.
Reviewed By: lattner, vsk, fhahn, dexonsmith
Differential Revision: https://reviews.llvm.org/D51664
2020-02-19 06:33:54 +08:00
|
|
|
bool Instruction::comesBefore(const Instruction *Other) const {
|
|
|
|
assert(Parent && Other->Parent &&
|
|
|
|
"instructions without BB parents have no order");
|
|
|
|
assert(Parent == Other->Parent && "cross-BB instruction order comparison");
|
|
|
|
if (!Parent->isInstrOrderValid())
|
|
|
|
Parent->renumberInstructions();
|
|
|
|
return Order < Other->Order;
|
|
|
|
}
|
|
|
|
|
2016-04-22 14:37:45 +08:00
|
|
|
void Instruction::setHasNoUnsignedWrap(bool b) {
|
|
|
|
cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setHasNoSignedWrap(bool b) {
|
|
|
|
cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setIsExact(bool b) {
|
|
|
|
cast<PossiblyExactOperator>(this)->setIsExact(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasNoUnsignedWrap() const {
|
|
|
|
return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasNoSignedWrap() const {
|
|
|
|
return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
|
|
|
|
}
|
|
|
|
|
2017-02-24 06:50:52 +08:00
|
|
|
void Instruction::dropPoisonGeneratingFlags() {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::Mul:
|
|
|
|
case Instruction::Shl:
|
|
|
|
cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
|
|
|
|
cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::AShr:
|
|
|
|
case Instruction::LShr:
|
|
|
|
cast<PossiblyExactOperator>(this)->setIsExact(false);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Instruction::GetElementPtr:
|
|
|
|
cast<GetElementPtrInst>(this)->setIsInBounds(false);
|
|
|
|
break;
|
|
|
|
}
|
2019-07-10 02:49:29 +08:00
|
|
|
// TODO: FastMathFlags!
|
2017-02-24 06:50:52 +08:00
|
|
|
}
|
|
|
|
|
2019-07-10 02:49:29 +08:00
|
|
|
|
2016-04-22 14:37:45 +08:00
|
|
|
bool Instruction::isExact() const {
|
|
|
|
return cast<PossiblyExactOperator>(this)->isExact();
|
|
|
|
}
|
|
|
|
|
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag
As discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html
and again more recently:
http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html
...this is a step in cleaning up our fast-math-flags implementation in IR to better match
the capabilities of both clang's user-visible flags and the backend's flags for SDNode.
As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the
'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic
reassociation - 'AllowReassoc'.
We're also adding a bit to allow approximations for library functions called 'ApproxFunc'
(this was initially proposed as 'libm' or similar).
...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did
look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits),
but that's apparently already used for other purposes. Also, I don't think we can just
add a field to FPMathOperator because Operator is not intended to be instantiated.
We'll defer movement of FMF to another day.
We keep the 'fast' keyword. I thought about removing that, but seeing IR like this:
%f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2
...made me think we want to keep the shortcut synonym.
Finally, this change is binary incompatible with existing IR as seen in the
compatibility tests. This statement:
"Newer releases can ignore features from older releases, but they cannot miscompile
them. For example, if nsw is ever replaced with something else, dropping it would be
a valid way to upgrade the IR."
( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility )
...provides the flexibility we want to make this change without requiring a new IR
version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will
fail to optimize some previously 'fast' code because it's no longer recognized as
'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'.
Note: an inter-dependent clang commit to use the new API name should closely follow
commit.
Differential Revision: https://reviews.llvm.org/D39304
llvm-svn: 317488
2017-11-07 00:27:15 +08:00
|
|
|
void Instruction::setFast(bool B) {
|
2012-11-27 08:41:22 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag
As discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html
and again more recently:
http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html
...this is a step in cleaning up our fast-math-flags implementation in IR to better match
the capabilities of both clang's user-visible flags and the backend's flags for SDNode.
As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the
'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic
reassociation - 'AllowReassoc'.
We're also adding a bit to allow approximations for library functions called 'ApproxFunc'
(this was initially proposed as 'libm' or similar).
...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did
look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits),
but that's apparently already used for other purposes. Also, I don't think we can just
add a field to FPMathOperator because Operator is not intended to be instantiated.
We'll defer movement of FMF to another day.
We keep the 'fast' keyword. I thought about removing that, but seeing IR like this:
%f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2
...made me think we want to keep the shortcut synonym.
Finally, this change is binary incompatible with existing IR as seen in the
compatibility tests. This statement:
"Newer releases can ignore features from older releases, but they cannot miscompile
them. For example, if nsw is ever replaced with something else, dropping it would be
a valid way to upgrade the IR."
( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility )
...provides the flexibility we want to make this change without requiring a new IR
version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will
fail to optimize some previously 'fast' code because it's no longer recognized as
'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'.
Note: an inter-dependent clang commit to use the new API name should closely follow
commit.
Differential Revision: https://reviews.llvm.org/D39304
llvm-svn: 317488
2017-11-07 00:27:15 +08:00
|
|
|
cast<FPMathOperator>(this)->setFast(B);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setHasAllowReassoc(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasAllowReassoc(B);
|
2012-11-27 08:41:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setHasNoNaNs(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasNoNaNs(B);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setHasNoInfs(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasNoInfs(B);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setHasNoSignedZeros(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setHasAllowReciprocal(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
|
|
|
|
}
|
|
|
|
|
2020-05-27 20:29:09 +08:00
|
|
|
void Instruction::setHasAllowContract(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasAllowContract(B);
|
|
|
|
}
|
|
|
|
|
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag
As discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html
and again more recently:
http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html
...this is a step in cleaning up our fast-math-flags implementation in IR to better match
the capabilities of both clang's user-visible flags and the backend's flags for SDNode.
As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the
'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic
reassociation - 'AllowReassoc'.
We're also adding a bit to allow approximations for library functions called 'ApproxFunc'
(this was initially proposed as 'libm' or similar).
...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did
look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits),
but that's apparently already used for other purposes. Also, I don't think we can just
add a field to FPMathOperator because Operator is not intended to be instantiated.
We'll defer movement of FMF to another day.
We keep the 'fast' keyword. I thought about removing that, but seeing IR like this:
%f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2
...made me think we want to keep the shortcut synonym.
Finally, this change is binary incompatible with existing IR as seen in the
compatibility tests. This statement:
"Newer releases can ignore features from older releases, but they cannot miscompile
them. For example, if nsw is ever replaced with something else, dropping it would be
a valid way to upgrade the IR."
( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility )
...provides the flexibility we want to make this change without requiring a new IR
version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will
fail to optimize some previously 'fast' code because it's no longer recognized as
'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'.
Note: an inter-dependent clang commit to use the new API name should closely follow
commit.
Differential Revision: https://reviews.llvm.org/D39304
llvm-svn: 317488
2017-11-07 00:27:15 +08:00
|
|
|
void Instruction::setHasApproxFunc(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasApproxFunc(B);
|
|
|
|
}
|
|
|
|
|
2012-11-27 08:41:22 +08:00
|
|
|
void Instruction::setFastMathFlags(FastMathFlags FMF) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setFastMathFlags(FMF);
|
|
|
|
}
|
|
|
|
|
2014-09-03 04:03:00 +08:00
|
|
|
void Instruction::copyFastMathFlags(FastMathFlags FMF) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
|
|
|
|
}
|
|
|
|
|
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag
As discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html
and again more recently:
http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html
...this is a step in cleaning up our fast-math-flags implementation in IR to better match
the capabilities of both clang's user-visible flags and the backend's flags for SDNode.
As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the
'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic
reassociation - 'AllowReassoc'.
We're also adding a bit to allow approximations for library functions called 'ApproxFunc'
(this was initially proposed as 'libm' or similar).
...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did
look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits),
but that's apparently already used for other purposes. Also, I don't think we can just
add a field to FPMathOperator because Operator is not intended to be instantiated.
We'll defer movement of FMF to another day.
We keep the 'fast' keyword. I thought about removing that, but seeing IR like this:
%f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2
...made me think we want to keep the shortcut synonym.
Finally, this change is binary incompatible with existing IR as seen in the
compatibility tests. This statement:
"Newer releases can ignore features from older releases, but they cannot miscompile
them. For example, if nsw is ever replaced with something else, dropping it would be
a valid way to upgrade the IR."
( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility )
...provides the flexibility we want to make this change without requiring a new IR
version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will
fail to optimize some previously 'fast' code because it's no longer recognized as
'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'.
Note: an inter-dependent clang commit to use the new API name should closely follow
commit.
Differential Revision: https://reviews.llvm.org/D39304
llvm-svn: 317488
2017-11-07 00:27:15 +08:00
|
|
|
bool Instruction::isFast() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag
As discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html
and again more recently:
http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html
...this is a step in cleaning up our fast-math-flags implementation in IR to better match
the capabilities of both clang's user-visible flags and the backend's flags for SDNode.
As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the
'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic
reassociation - 'AllowReassoc'.
We're also adding a bit to allow approximations for library functions called 'ApproxFunc'
(this was initially proposed as 'libm' or similar).
...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did
look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits),
but that's apparently already used for other purposes. Also, I don't think we can just
add a field to FPMathOperator because Operator is not intended to be instantiated.
We'll defer movement of FMF to another day.
We keep the 'fast' keyword. I thought about removing that, but seeing IR like this:
%f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2
...made me think we want to keep the shortcut synonym.
Finally, this change is binary incompatible with existing IR as seen in the
compatibility tests. This statement:
"Newer releases can ignore features from older releases, but they cannot miscompile
them. For example, if nsw is ever replaced with something else, dropping it would be
a valid way to upgrade the IR."
( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility )
...provides the flexibility we want to make this change without requiring a new IR
version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will
fail to optimize some previously 'fast' code because it's no longer recognized as
'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'.
Note: an inter-dependent clang commit to use the new API name should closely follow
commit.
Differential Revision: https://reviews.llvm.org/D39304
llvm-svn: 317488
2017-11-07 00:27:15 +08:00
|
|
|
return cast<FPMathOperator>(this)->isFast();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasAllowReassoc() const {
|
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
|
|
|
return cast<FPMathOperator>(this)->hasAllowReassoc();
|
2012-11-27 08:41:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasNoNaNs() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
2012-11-27 08:41:22 +08:00
|
|
|
return cast<FPMathOperator>(this)->hasNoNaNs();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasNoInfs() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
2012-11-27 08:41:22 +08:00
|
|
|
return cast<FPMathOperator>(this)->hasNoInfs();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasNoSignedZeros() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
2012-11-27 08:41:22 +08:00
|
|
|
return cast<FPMathOperator>(this)->hasNoSignedZeros();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasAllowReciprocal() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
2012-11-27 08:41:22 +08:00
|
|
|
return cast<FPMathOperator>(this)->hasAllowReciprocal();
|
|
|
|
}
|
|
|
|
|
2017-03-29 04:11:52 +08:00
|
|
|
bool Instruction::hasAllowContract() const {
|
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
|
|
|
return cast<FPMathOperator>(this)->hasAllowContract();
|
|
|
|
}
|
|
|
|
|
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag
As discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html
and again more recently:
http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html
...this is a step in cleaning up our fast-math-flags implementation in IR to better match
the capabilities of both clang's user-visible flags and the backend's flags for SDNode.
As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the
'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic
reassociation - 'AllowReassoc'.
We're also adding a bit to allow approximations for library functions called 'ApproxFunc'
(this was initially proposed as 'libm' or similar).
...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did
look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits),
but that's apparently already used for other purposes. Also, I don't think we can just
add a field to FPMathOperator because Operator is not intended to be instantiated.
We'll defer movement of FMF to another day.
We keep the 'fast' keyword. I thought about removing that, but seeing IR like this:
%f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2
...made me think we want to keep the shortcut synonym.
Finally, this change is binary incompatible with existing IR as seen in the
compatibility tests. This statement:
"Newer releases can ignore features from older releases, but they cannot miscompile
them. For example, if nsw is ever replaced with something else, dropping it would be
a valid way to upgrade the IR."
( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility )
...provides the flexibility we want to make this change without requiring a new IR
version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will
fail to optimize some previously 'fast' code because it's no longer recognized as
'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'.
Note: an inter-dependent clang commit to use the new API name should closely follow
commit.
Differential Revision: https://reviews.llvm.org/D39304
llvm-svn: 317488
2017-11-07 00:27:15 +08:00
|
|
|
bool Instruction::hasApproxFunc() const {
|
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
|
|
|
return cast<FPMathOperator>(this)->hasApproxFunc();
|
|
|
|
}
|
|
|
|
|
2012-11-27 08:41:22 +08:00
|
|
|
FastMathFlags Instruction::getFastMathFlags() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
2012-11-27 08:41:22 +08:00
|
|
|
return cast<FPMathOperator>(this)->getFastMathFlags();
|
|
|
|
}
|
2005-08-08 13:21:50 +08:00
|
|
|
|
2012-11-30 05:25:12 +08:00
|
|
|
void Instruction::copyFastMathFlags(const Instruction *I) {
|
2014-09-03 04:03:00 +08:00
|
|
|
copyFastMathFlags(I->getFastMathFlags());
|
2012-11-30 05:25:12 +08:00
|
|
|
}
|
|
|
|
|
[LoopVectorize] Don't preserve nsw/nuw flags on shrunken ops.
If we're shrinking a binary operation, it may be the case that the new
operations wraps where the old didn't. If this happens, the behavior
should be well-defined. So, we can't always carry wrapping flags with us
when we shrink operations.
If we do, we get incorrect optimizations in cases like:
void foo(const unsigned char *from, unsigned char *to, int n) {
for (int i = 0; i < n; i++)
to[i] = from[i] - 128;
}
which gets optimized to:
void foo(const unsigned char *from, unsigned char *to, int n) {
for (int i = 0; i < n; i++)
to[i] = from[i] | 128;
}
Because:
- InstCombine turned `sub i32 %from.i, 128` into
`add nuw nsw i32 %from.i, 128`.
- LoopVectorize vectorized the add to be `add nuw nsw <16 x i8>` with a
vector full of `i8 128`s
- InstCombine took advantage of the fact that the newly-shrunken add
"couldn't wrap", and changed the `add` to an `or`.
InstCombine seems happy to figure out whether we can add nuw/nsw on its
own, so I just decided to drop the flags. There are already a number of
places in LoopVectorize where we rely on InstCombine to clean up.
llvm-svn: 305053
2017-06-09 11:56:15 +08:00
|
|
|
void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
|
2016-04-22 14:37:45 +08:00
|
|
|
// Copy the wrapping flags.
|
[LoopVectorize] Don't preserve nsw/nuw flags on shrunken ops.
If we're shrinking a binary operation, it may be the case that the new
operations wraps where the old didn't. If this happens, the behavior
should be well-defined. So, we can't always carry wrapping flags with us
when we shrink operations.
If we do, we get incorrect optimizations in cases like:
void foo(const unsigned char *from, unsigned char *to, int n) {
for (int i = 0; i < n; i++)
to[i] = from[i] - 128;
}
which gets optimized to:
void foo(const unsigned char *from, unsigned char *to, int n) {
for (int i = 0; i < n; i++)
to[i] = from[i] | 128;
}
Because:
- InstCombine turned `sub i32 %from.i, 128` into
`add nuw nsw i32 %from.i, 128`.
- LoopVectorize vectorized the add to be `add nuw nsw <16 x i8>` with a
vector full of `i8 128`s
- InstCombine took advantage of the fact that the newly-shrunken add
"couldn't wrap", and changed the `add` to an `or`.
InstCombine seems happy to figure out whether we can add nuw/nsw on its
own, so I just decided to drop the flags. There are already a number of
places in LoopVectorize where we rely on InstCombine to clean up.
llvm-svn: 305053
2017-06-09 11:56:15 +08:00
|
|
|
if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
|
|
|
|
if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
|
2016-04-22 14:37:51 +08:00
|
|
|
setHasNoSignedWrap(OB->hasNoSignedWrap());
|
|
|
|
setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
|
|
|
|
}
|
2016-04-22 14:37:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy the exact flag.
|
|
|
|
if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
|
2016-04-22 14:37:51 +08:00
|
|
|
if (isa<PossiblyExactOperator>(this))
|
|
|
|
setIsExact(PE->isExact());
|
2016-04-22 14:37:45 +08:00
|
|
|
|
|
|
|
// Copy the fast-math flags.
|
|
|
|
if (auto *FP = dyn_cast<FPMathOperator>(V))
|
2016-04-22 14:37:51 +08:00
|
|
|
if (isa<FPMathOperator>(this))
|
|
|
|
copyFastMathFlags(FP->getFastMathFlags());
|
2016-07-15 13:02:31 +08:00
|
|
|
|
|
|
|
if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
|
|
|
|
if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
|
|
|
|
DestGEP->setIsInBounds(SrcGEP->isInBounds() | DestGEP->isInBounds());
|
2016-04-22 14:37:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::andIRFlags(const Value *V) {
|
|
|
|
if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
|
2016-04-22 14:37:51 +08:00
|
|
|
if (isa<OverflowingBinaryOperator>(this)) {
|
|
|
|
setHasNoSignedWrap(hasNoSignedWrap() & OB->hasNoSignedWrap());
|
|
|
|
setHasNoUnsignedWrap(hasNoUnsignedWrap() & OB->hasNoUnsignedWrap());
|
|
|
|
}
|
2016-04-22 14:37:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
|
2016-04-22 14:37:51 +08:00
|
|
|
if (isa<PossiblyExactOperator>(this))
|
|
|
|
setIsExact(isExact() & PE->isExact());
|
2016-04-22 14:37:45 +08:00
|
|
|
|
|
|
|
if (auto *FP = dyn_cast<FPMathOperator>(V)) {
|
2016-04-22 14:37:51 +08:00
|
|
|
if (isa<FPMathOperator>(this)) {
|
|
|
|
FastMathFlags FM = getFastMathFlags();
|
|
|
|
FM &= FP->getFastMathFlags();
|
|
|
|
copyFastMathFlags(FM);
|
|
|
|
}
|
2016-04-22 14:37:45 +08:00
|
|
|
}
|
2016-07-15 13:02:31 +08:00
|
|
|
|
|
|
|
if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
|
|
|
|
if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
|
|
|
|
DestGEP->setIsInBounds(SrcGEP->isInBounds() & DestGEP->isInBounds());
|
2016-04-22 14:37:45 +08:00
|
|
|
}
|
2012-11-30 05:25:12 +08:00
|
|
|
|
2002-07-15 07:09:40 +08:00
|
|
|
const char *Instruction::getOpcodeName(unsigned OpCode) {
|
|
|
|
switch (OpCode) {
|
|
|
|
// Terminators
|
2002-08-15 02:18:02 +08:00
|
|
|
case Ret: return "ret";
|
|
|
|
case Br: return "br";
|
2002-07-15 07:09:40 +08:00
|
|
|
case Switch: return "switch";
|
2009-10-28 08:19:10 +08:00
|
|
|
case IndirectBr: return "indirectbr";
|
2002-07-15 07:09:40 +08:00
|
|
|
case Invoke: return "invoke";
|
2011-07-31 14:30:59 +08:00
|
|
|
case Resume: return "resume";
|
2004-10-17 02:08:06 +08:00
|
|
|
case Unreachable: return "unreachable";
|
2015-08-01 01:58:14 +08:00
|
|
|
case CleanupRet: return "cleanupret";
|
|
|
|
case CatchRet: return "catchret";
|
|
|
|
case CatchPad: return "catchpad";
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
case CatchSwitch: return "catchswitch";
|
2019-02-09 04:48:56 +08:00
|
|
|
case CallBr: return "callbr";
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2018-11-14 02:15:47 +08:00
|
|
|
// Standard unary operators...
|
|
|
|
case FNeg: return "fneg";
|
|
|
|
|
2002-07-15 07:09:40 +08:00
|
|
|
// Standard binary operators...
|
|
|
|
case Add: return "add";
|
2009-06-05 06:49:04 +08:00
|
|
|
case FAdd: return "fadd";
|
2002-07-15 07:09:40 +08:00
|
|
|
case Sub: return "sub";
|
2009-06-05 06:49:04 +08:00
|
|
|
case FSub: return "fsub";
|
2002-07-15 07:09:40 +08:00
|
|
|
case Mul: return "mul";
|
2009-06-05 06:49:04 +08:00
|
|
|
case FMul: return "fmul";
|
2006-10-26 14:15:43 +08:00
|
|
|
case UDiv: return "udiv";
|
|
|
|
case SDiv: return "sdiv";
|
|
|
|
case FDiv: return "fdiv";
|
2006-11-02 09:53:59 +08:00
|
|
|
case URem: return "urem";
|
|
|
|
case SRem: return "srem";
|
|
|
|
case FRem: return "frem";
|
2002-07-15 07:09:40 +08:00
|
|
|
|
|
|
|
// Logical operators...
|
|
|
|
case And: return "and";
|
|
|
|
case Or : return "or";
|
|
|
|
case Xor: return "xor";
|
|
|
|
|
|
|
|
// Memory instructions...
|
|
|
|
case Alloca: return "alloca";
|
|
|
|
case Load: return "load";
|
|
|
|
case Store: return "store";
|
2011-07-29 05:48:00 +08:00
|
|
|
case AtomicCmpXchg: return "cmpxchg";
|
|
|
|
case AtomicRMW: return "atomicrmw";
|
2011-07-26 07:16:38 +08:00
|
|
|
case Fence: return "fence";
|
2002-07-15 07:09:40 +08:00
|
|
|
case GetElementPtr: return "getelementptr";
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2006-11-27 09:05:10 +08:00
|
|
|
// Convert instructions...
|
2013-11-15 09:34:59 +08:00
|
|
|
case Trunc: return "trunc";
|
|
|
|
case ZExt: return "zext";
|
|
|
|
case SExt: return "sext";
|
|
|
|
case FPTrunc: return "fptrunc";
|
|
|
|
case FPExt: return "fpext";
|
|
|
|
case FPToUI: return "fptoui";
|
|
|
|
case FPToSI: return "fptosi";
|
|
|
|
case UIToFP: return "uitofp";
|
|
|
|
case SIToFP: return "sitofp";
|
|
|
|
case IntToPtr: return "inttoptr";
|
|
|
|
case PtrToInt: return "ptrtoint";
|
|
|
|
case BitCast: return "bitcast";
|
|
|
|
case AddrSpaceCast: return "addrspacecast";
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2002-07-15 07:09:40 +08:00
|
|
|
// Other instructions...
|
2006-12-03 14:27:29 +08:00
|
|
|
case ICmp: return "icmp";
|
|
|
|
case FCmp: return "fcmp";
|
2006-11-27 09:05:10 +08:00
|
|
|
case PHI: return "phi";
|
|
|
|
case Select: return "select";
|
|
|
|
case Call: return "call";
|
|
|
|
case Shl: return "shl";
|
|
|
|
case LShr: return "lshr";
|
|
|
|
case AShr: return "ashr";
|
|
|
|
case VAArg: return "va_arg";
|
2006-01-11 03:05:34 +08:00
|
|
|
case ExtractElement: return "extractelement";
|
2006-11-27 09:05:10 +08:00
|
|
|
case InsertElement: return "insertelement";
|
|
|
|
case ShuffleVector: return "shufflevector";
|
2008-05-30 18:31:54 +08:00
|
|
|
case ExtractValue: return "extractvalue";
|
|
|
|
case InsertValue: return "insertvalue";
|
2011-08-13 04:24:12 +08:00
|
|
|
case LandingPad: return "landingpad";
|
2015-08-23 08:26:33 +08:00
|
|
|
case CleanupPad: return "cleanuppad";
|
[IR] Redefine Freeze instruction
Summary:
This patch redefines freeze instruction from being UnaryOperator to a subclass of UnaryInstruction.
ConstantExpr freeze is removed, as discussed in the previous review.
FreezeOperator is not added because there's no ConstantExpr freeze.
`freeze i8* null` test is added to `test/Bindings/llvm-c/freeze.ll` as well, because the null pointer-related bug in `tools/llvm-c/echo.cpp` is now fixed.
InstVisitor has visitFreeze now because freeze is not unaryop anymore.
Reviewers: whitequark, deadalnix, craig.topper, jdoerfert, lebedev.ri
Reviewed By: craig.topper, lebedev.ri
Subscribers: regehr, nlopes, mehdi_amini, hiraditya, steven_wu, dexonsmith, jfb, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D69932
2019-11-07 00:17:49 +08:00
|
|
|
case Freeze: return "freeze";
|
2003-05-08 10:44:12 +08:00
|
|
|
|
2002-07-15 07:09:40 +08:00
|
|
|
default: return "<Invalid operator> ";
|
|
|
|
}
|
|
|
|
}
|
2002-10-31 12:14:01 +08:00
|
|
|
|
2016-10-06 02:51:12 +08:00
|
|
|
/// Return true if both instructions have the same special state. This must be
|
2016-04-12 06:30:37 +08:00
|
|
|
/// kept in sync with FunctionComparator::cmpOperations in
|
|
|
|
/// lib/Transforms/IPO/MergeFunctions.cpp.
|
2014-05-28 05:35:46 +08:00
|
|
|
static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
|
|
|
|
bool IgnoreAlignment = false) {
|
|
|
|
assert(I1->getOpcode() == I2->getOpcode() &&
|
|
|
|
"Can not compare special state of different instructions");
|
|
|
|
|
2016-04-13 02:06:55 +08:00
|
|
|
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
|
|
|
|
return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
|
|
|
|
(AI->getAlignment() == cast<AllocaInst>(I2)->getAlignment() ||
|
|
|
|
IgnoreAlignment);
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
|
|
|
|
return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
|
|
|
|
(LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
|
|
|
|
IgnoreAlignment) &&
|
|
|
|
LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
|
2017-07-12 06:23:00 +08:00
|
|
|
LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
|
|
|
|
return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
|
|
|
|
(SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
|
|
|
|
IgnoreAlignment) &&
|
|
|
|
SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
|
2017-07-12 06:23:00 +08:00
|
|
|
SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
|
|
|
|
return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
|
|
|
|
if (const CallInst *CI = dyn_cast<CallInst>(I1))
|
|
|
|
return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
|
|
|
|
CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
|
2015-12-15 03:11:35 +08:00
|
|
|
CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
|
|
|
|
CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
|
|
|
|
return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
|
2015-12-15 03:11:35 +08:00
|
|
|
CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
|
|
|
|
CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
|
2019-02-09 04:48:56 +08:00
|
|
|
if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
|
|
|
|
return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
|
|
|
|
CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
|
|
|
|
CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
|
|
|
|
return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
|
|
|
|
if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
|
|
|
|
return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
|
|
|
|
if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
|
|
|
|
return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
|
2017-07-12 06:23:00 +08:00
|
|
|
FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
|
|
|
|
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
|
2014-05-28 05:35:46 +08:00
|
|
|
CXI->getSuccessOrdering() ==
|
|
|
|
cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
|
|
|
|
CXI->getFailureOrdering() ==
|
|
|
|
cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
|
2017-07-12 06:23:00 +08:00
|
|
|
CXI->getSyncScopeID() ==
|
|
|
|
cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
|
|
|
|
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
|
|
|
|
RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
|
|
|
|
RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
|
2017-07-12 06:23:00 +08:00
|
|
|
RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
|
2020-04-01 04:08:59 +08:00
|
|
|
if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
|
|
|
|
return SVI->getShuffleMask() ==
|
|
|
|
cast<ShuffleVectorInst>(I2)->getShuffleMask();
|
2014-05-28 05:35:46 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-11-27 16:39:18 +08:00
|
|
|
bool Instruction::isIdenticalTo(const Instruction *I) const {
|
2009-08-26 06:24:20 +08:00
|
|
|
return isIdenticalToWhenDefined(I) &&
|
2009-08-26 06:11:20 +08:00
|
|
|
SubclassOptionalData == I->SubclassOptionalData;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
|
2004-11-30 10:51:53 +08:00
|
|
|
if (getOpcode() != I->getOpcode() ||
|
|
|
|
getNumOperands() != I->getNumOperands() ||
|
|
|
|
getType() != I->getType())
|
|
|
|
return false;
|
|
|
|
|
2014-06-02 09:35:34 +08:00
|
|
|
// If both instructions have no operands, they are identical.
|
|
|
|
if (getNumOperands() == 0 && I->getNumOperands() == 0)
|
|
|
|
return haveSameSpecialState(this, I);
|
|
|
|
|
[InstCombine] Take 2: Perform trivial PHI CSE
The original take was 6102310d814ad73eab60a88b21dd70874f7a056f,
which taught InstSimplify to do that, which seemed better at time,
since we got EarlyCSE support for free.
However, it was proven that we can not do that there,
the simplified-to PHI would not be reachable from the original PHI,
and that is not something InstSimplify is allowed to do,
as noted in the commit ed90f15efb40d26b5d3ead3bb8e9e284218e0186
that reverted it :
> It appears to cause compilation non-determinism and caused stage3 mismatches.
However InstCombine already does many different optimizations,
so it should be a safe place to do it here.
Note that we still can't just compare incoming values ranges,
because there is no guarantee that these PHI's we'd simplify to
were already re-visited and sorted.
However coming up with a test is problematic.
Effects on vanilla llvm test-suite + RawSpeed:
```
| statistic name | baseline | proposed | Δ | % | |%| |
|----------------------------------------------------|-----------|-----------|-------:|---------:|---------:|
| instcombine.NumPHICSEs | 0 | 22228 | 22228 | 0.00% | 0.00% |
| asm-printer.EmittedInsts | 7942329 | 7942456 | 127 | 0.00% | 0.00% |
| assembler.ObjectBytes | 254295632 | 254313792 | 18160 | 0.01% | 0.01% |
| early-cse.NumCSE | 2183283 | 2183272 | -11 | 0.00% | 0.00% |
| early-cse.NumSimplify | 550105 | 541842 | -8263 | -1.50% | 1.50% |
| instcombine.NumAggregateReconstructionsSimplified | 73 | 4506 | 4433 | 6072.60% | 6072.60% |
| instcombine.NumCombined | 3640311 | 3666911 | 26600 | 0.73% | 0.73% |
| instcombine.NumDeadInst | 1778204 | 1783318 | 5114 | 0.29% | 0.29% |
| instcount.NumCallInst | 1758395 | 1758804 | 409 | 0.02% | 0.02% |
| instcount.NumInvokeInst | 59478 | 59502 | 24 | 0.04% | 0.04% |
| instcount.NumPHIInst | 330557 | 330549 | -8 | 0.00% | 0.00% |
| instcount.TotalBlocks | 1077138 | 1077221 | 83 | 0.01% | 0.01% |
| instcount.TotalFuncs | 101442 | 101441 | -1 | 0.00% | 0.00% |
| instcount.TotalInsts | 8831946 | 8832611 | 665 | 0.01% | 0.01% |
| simplifycfg.NumInvokes | 4300 | 4410 | 110 | 2.56% | 2.56% |
| simplifycfg.NumSimpl | 1019813 | 999740 | -20073 | -1.97% | 1.97% |
```
So it fires ~22k times, which is less than ~24k the take 1 did.
It allows foldAggregateConstructionIntoAggregateReuse() to actually work
after PHI-of-extractvalue folds did their thing. Previously SimplifyCFG
would have done this PHI CSE, of all places. Additionally, allows some
more `invoke`->`call` folds to happen (+110, +2.56%).
All in all, expectedly, this catches less things overall,
but all the motivational cases are still caught, so all good.
2020-08-29 15:42:38 +08:00
|
|
|
// PHI nodes are special.
|
2020-08-29 07:32:25 +08:00
|
|
|
if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
|
|
|
|
const PHINode *otherPHI = cast<PHINode>(I);
|
2020-08-29 19:53:15 +08:00
|
|
|
// PHI nodes don't necessarily have their operands in the same order,
|
[InstCombine] Take 2: Perform trivial PHI CSE
The original take was 6102310d814ad73eab60a88b21dd70874f7a056f,
which taught InstSimplify to do that, which seemed better at time,
since we got EarlyCSE support for free.
However, it was proven that we can not do that there,
the simplified-to PHI would not be reachable from the original PHI,
and that is not something InstSimplify is allowed to do,
as noted in the commit ed90f15efb40d26b5d3ead3bb8e9e284218e0186
that reverted it :
> It appears to cause compilation non-determinism and caused stage3 mismatches.
However InstCombine already does many different optimizations,
so it should be a safe place to do it here.
Note that we still can't just compare incoming values ranges,
because there is no guarantee that these PHI's we'd simplify to
were already re-visited and sorted.
However coming up with a test is problematic.
Effects on vanilla llvm test-suite + RawSpeed:
```
| statistic name | baseline | proposed | Δ | % | |%| |
|----------------------------------------------------|-----------|-----------|-------:|---------:|---------:|
| instcombine.NumPHICSEs | 0 | 22228 | 22228 | 0.00% | 0.00% |
| asm-printer.EmittedInsts | 7942329 | 7942456 | 127 | 0.00% | 0.00% |
| assembler.ObjectBytes | 254295632 | 254313792 | 18160 | 0.01% | 0.01% |
| early-cse.NumCSE | 2183283 | 2183272 | -11 | 0.00% | 0.00% |
| early-cse.NumSimplify | 550105 | 541842 | -8263 | -1.50% | 1.50% |
| instcombine.NumAggregateReconstructionsSimplified | 73 | 4506 | 4433 | 6072.60% | 6072.60% |
| instcombine.NumCombined | 3640311 | 3666911 | 26600 | 0.73% | 0.73% |
| instcombine.NumDeadInst | 1778204 | 1783318 | 5114 | 0.29% | 0.29% |
| instcount.NumCallInst | 1758395 | 1758804 | 409 | 0.02% | 0.02% |
| instcount.NumInvokeInst | 59478 | 59502 | 24 | 0.04% | 0.04% |
| instcount.NumPHIInst | 330557 | 330549 | -8 | 0.00% | 0.00% |
| instcount.TotalBlocks | 1077138 | 1077221 | 83 | 0.01% | 0.01% |
| instcount.TotalFuncs | 101442 | 101441 | -1 | 0.00% | 0.00% |
| instcount.TotalInsts | 8831946 | 8832611 | 665 | 0.01% | 0.01% |
| simplifycfg.NumInvokes | 4300 | 4410 | 110 | 2.56% | 2.56% |
| simplifycfg.NumSimpl | 1019813 | 999740 | -20073 | -1.97% | 1.97% |
```
So it fires ~22k times, which is less than ~24k the take 1 did.
It allows foldAggregateConstructionIntoAggregateReuse() to actually work
after PHI-of-extractvalue folds did their thing. Previously SimplifyCFG
would have done this PHI CSE, of all places. Additionally, allows some
more `invoke`->`call` folds to happen (+110, +2.56%).
All in all, expectedly, this catches less things overall,
but all the motivational cases are still caught, so all good.
2020-08-29 15:42:38 +08:00
|
|
|
// so we shouldn't just compare ranges of incoming blocks/values.
|
|
|
|
|
|
|
|
// If both PHI's are in the same basic block, which is the most interesting
|
|
|
|
// case, we know they must have identical predecessor list,
|
|
|
|
// so we only need to check the incoming values.
|
|
|
|
if (thisPHI->getParent() == otherPHI->getParent()) {
|
|
|
|
return all_of(thisPHI->blocks(), [thisPHI, otherPHI](BasicBlock *PredBB) {
|
|
|
|
return thisPHI->getIncomingValueForBlock(PredBB) ==
|
|
|
|
otherPHI->getIncomingValueForBlock(PredBB);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, let's just naively compare operands/blocks.
|
|
|
|
return std::equal(op_begin(), op_end(), I->op_begin()) &&
|
|
|
|
std::equal(thisPHI->block_begin(), thisPHI->block_end(),
|
2020-08-29 07:32:25 +08:00
|
|
|
otherPHI->block_begin());
|
|
|
|
}
|
|
|
|
|
[InstCombine] Take 2: Perform trivial PHI CSE
The original take was 6102310d814ad73eab60a88b21dd70874f7a056f,
which taught InstSimplify to do that, which seemed better at time,
since we got EarlyCSE support for free.
However, it was proven that we can not do that there,
the simplified-to PHI would not be reachable from the original PHI,
and that is not something InstSimplify is allowed to do,
as noted in the commit ed90f15efb40d26b5d3ead3bb8e9e284218e0186
that reverted it :
> It appears to cause compilation non-determinism and caused stage3 mismatches.
However InstCombine already does many different optimizations,
so it should be a safe place to do it here.
Note that we still can't just compare incoming values ranges,
because there is no guarantee that these PHI's we'd simplify to
were already re-visited and sorted.
However coming up with a test is problematic.
Effects on vanilla llvm test-suite + RawSpeed:
```
| statistic name | baseline | proposed | Δ | % | |%| |
|----------------------------------------------------|-----------|-----------|-------:|---------:|---------:|
| instcombine.NumPHICSEs | 0 | 22228 | 22228 | 0.00% | 0.00% |
| asm-printer.EmittedInsts | 7942329 | 7942456 | 127 | 0.00% | 0.00% |
| assembler.ObjectBytes | 254295632 | 254313792 | 18160 | 0.01% | 0.01% |
| early-cse.NumCSE | 2183283 | 2183272 | -11 | 0.00% | 0.00% |
| early-cse.NumSimplify | 550105 | 541842 | -8263 | -1.50% | 1.50% |
| instcombine.NumAggregateReconstructionsSimplified | 73 | 4506 | 4433 | 6072.60% | 6072.60% |
| instcombine.NumCombined | 3640311 | 3666911 | 26600 | 0.73% | 0.73% |
| instcombine.NumDeadInst | 1778204 | 1783318 | 5114 | 0.29% | 0.29% |
| instcount.NumCallInst | 1758395 | 1758804 | 409 | 0.02% | 0.02% |
| instcount.NumInvokeInst | 59478 | 59502 | 24 | 0.04% | 0.04% |
| instcount.NumPHIInst | 330557 | 330549 | -8 | 0.00% | 0.00% |
| instcount.TotalBlocks | 1077138 | 1077221 | 83 | 0.01% | 0.01% |
| instcount.TotalFuncs | 101442 | 101441 | -1 | 0.00% | 0.00% |
| instcount.TotalInsts | 8831946 | 8832611 | 665 | 0.01% | 0.01% |
| simplifycfg.NumInvokes | 4300 | 4410 | 110 | 2.56% | 2.56% |
| simplifycfg.NumSimpl | 1019813 | 999740 | -20073 | -1.97% | 1.97% |
```
So it fires ~22k times, which is less than ~24k the take 1 did.
It allows foldAggregateConstructionIntoAggregateReuse() to actually work
after PHI-of-extractvalue folds did their thing. Previously SimplifyCFG
would have done this PHI CSE, of all places. Additionally, allows some
more `invoke`->`call` folds to happen (+110, +2.56%).
All in all, expectedly, this catches less things overall,
but all the motivational cases are still caught, so all good.
2020-08-29 15:42:38 +08:00
|
|
|
// We have two instructions of identical opcode and #operands. Check to see
|
|
|
|
// if all operands are the same.
|
|
|
|
if (!std::equal(op_begin(), op_end(), I->op_begin()))
|
|
|
|
return false;
|
|
|
|
|
2014-05-28 05:35:46 +08:00
|
|
|
return haveSameSpecialState(this, I);
|
2006-12-23 14:05:41 +08:00
|
|
|
}
|
|
|
|
|
2016-04-12 06:30:37 +08:00
|
|
|
// Keep this in sync with FunctionComparator::cmpOperations in
|
2009-06-13 03:03:05 +08:00
|
|
|
// lib/Transforms/IPO/MergeFunctions.cpp.
|
2012-06-28 13:42:26 +08:00
|
|
|
bool Instruction::isSameOperationAs(const Instruction *I,
|
|
|
|
unsigned flags) const {
|
|
|
|
bool IgnoreAlignment = flags & CompareIgnoringAlignment;
|
|
|
|
bool UseScalarTypes = flags & CompareUsingScalarTypes;
|
|
|
|
|
2009-06-13 03:03:05 +08:00
|
|
|
if (getOpcode() != I->getOpcode() ||
|
|
|
|
getNumOperands() != I->getNumOperands() ||
|
2012-06-28 13:42:26 +08:00
|
|
|
(UseScalarTypes ?
|
|
|
|
getType()->getScalarType() != I->getType()->getScalarType() :
|
|
|
|
getType() != I->getType()))
|
2006-12-23 14:05:41 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// We have two instructions of identical opcode and #operands. Check to see
|
|
|
|
// if all operands are the same type
|
|
|
|
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
|
2012-06-28 13:42:26 +08:00
|
|
|
if (UseScalarTypes ?
|
|
|
|
getOperand(i)->getType()->getScalarType() !=
|
|
|
|
I->getOperand(i)->getType()->getScalarType() :
|
|
|
|
getOperand(i)->getType() != I->getOperand(i)->getType())
|
2006-12-23 14:05:41 +08:00
|
|
|
return false;
|
|
|
|
|
2014-05-28 05:35:46 +08:00
|
|
|
return haveSameSpecialState(this, I, IgnoreAlignment);
|
2004-11-30 10:51:53 +08:00
|
|
|
}
|
|
|
|
|
2008-04-21 06:11:30 +08:00
|
|
|
bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
|
2014-03-09 11:16:01 +08:00
|
|
|
for (const Use &U : uses()) {
|
2008-04-21 06:11:30 +08:00
|
|
|
// PHI nodes uses values in the corresponding predecessor block. For other
|
|
|
|
// instructions, just check to see whether the parent of the use matches up.
|
2014-03-09 11:16:01 +08:00
|
|
|
const Instruction *I = cast<Instruction>(U.getUser());
|
|
|
|
const PHINode *PN = dyn_cast<PHINode>(I);
|
2014-04-09 14:08:46 +08:00
|
|
|
if (!PN) {
|
2014-03-09 11:16:01 +08:00
|
|
|
if (I->getParent() != BB)
|
2008-04-21 06:11:30 +08:00
|
|
|
return true;
|
|
|
|
continue;
|
|
|
|
}
|
2009-09-20 10:20:51 +08:00
|
|
|
|
2014-03-09 11:16:01 +08:00
|
|
|
if (PN->getIncomingBlock(U) != BB)
|
2008-04-21 06:11:30 +08:00
|
|
|
return true;
|
|
|
|
}
|
2009-09-20 10:20:51 +08:00
|
|
|
return false;
|
2008-04-21 06:11:30 +08:00
|
|
|
}
|
|
|
|
|
2008-05-09 01:16:51 +08:00
|
|
|
bool Instruction::mayReadFromMemory() const {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default: return false;
|
|
|
|
case Instruction::VAArg:
|
2008-05-09 05:58:49 +08:00
|
|
|
case Instruction::Load:
|
2011-07-27 09:08:30 +08:00
|
|
|
case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
|
2011-07-29 11:05:32 +08:00
|
|
|
case Instruction::AtomicCmpXchg:
|
|
|
|
case Instruction::AtomicRMW:
|
2015-09-11 02:50:09 +08:00
|
|
|
case Instruction::CatchPad:
|
2015-08-01 01:58:14 +08:00
|
|
|
case Instruction::CatchRet:
|
2008-05-09 01:16:51 +08:00
|
|
|
return true;
|
|
|
|
case Instruction::Call:
|
|
|
|
case Instruction::Invoke:
|
2019-02-09 04:48:56 +08:00
|
|
|
case Instruction::CallBr:
|
2019-10-21 14:52:08 +08:00
|
|
|
return !cast<CallBase>(this)->doesNotReadMemory();
|
2008-05-09 05:58:49 +08:00
|
|
|
case Instruction::Store:
|
2011-08-16 05:00:18 +08:00
|
|
|
return !cast<StoreInst>(this)->isUnordered();
|
2008-05-09 01:16:51 +08:00
|
|
|
}
|
|
|
|
}
|
2008-04-21 06:11:30 +08:00
|
|
|
|
2007-02-16 07:15:00 +08:00
|
|
|
bool Instruction::mayWriteToMemory() const {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default: return false;
|
2011-07-27 09:08:30 +08:00
|
|
|
case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
|
2007-12-04 04:06:50 +08:00
|
|
|
case Instruction::Store:
|
2007-02-16 07:15:00 +08:00
|
|
|
case Instruction::VAArg:
|
2011-07-29 11:05:32 +08:00
|
|
|
case Instruction::AtomicCmpXchg:
|
|
|
|
case Instruction::AtomicRMW:
|
2015-09-11 02:50:09 +08:00
|
|
|
case Instruction::CatchPad:
|
2015-08-01 01:58:14 +08:00
|
|
|
case Instruction::CatchRet:
|
2007-02-16 07:15:00 +08:00
|
|
|
return true;
|
|
|
|
case Instruction::Call:
|
2008-05-09 01:16:51 +08:00
|
|
|
case Instruction::Invoke:
|
2019-02-09 04:48:56 +08:00
|
|
|
case Instruction::CallBr:
|
2019-01-30 07:31:54 +08:00
|
|
|
return !cast<CallBase>(this)->onlyReadsMemory();
|
2007-02-16 07:15:00 +08:00
|
|
|
case Instruction::Load:
|
2011-08-16 05:00:18 +08:00
|
|
|
return !cast<LoadInst>(this)->isUnordered();
|
2007-02-16 07:15:00 +08:00
|
|
|
}
|
|
|
|
}
|
2002-10-31 12:14:01 +08:00
|
|
|
|
2014-09-04 05:29:59 +08:00
|
|
|
bool Instruction::isAtomic() const {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case Instruction::AtomicCmpXchg:
|
|
|
|
case Instruction::AtomicRMW:
|
|
|
|
case Instruction::Fence:
|
|
|
|
return true;
|
|
|
|
case Instruction::Load:
|
2016-04-07 05:19:33 +08:00
|
|
|
return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
|
2014-09-04 05:29:59 +08:00
|
|
|
case Instruction::Store:
|
2016-04-07 05:19:33 +08:00
|
|
|
return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
|
2014-09-04 05:29:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-09 23:27:17 +08:00
|
|
|
bool Instruction::hasAtomicLoad() const {
|
|
|
|
assert(isAtomic());
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case Instruction::AtomicCmpXchg:
|
|
|
|
case Instruction::AtomicRMW:
|
|
|
|
case Instruction::Load:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasAtomicStore() const {
|
|
|
|
assert(isAtomic());
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case Instruction::AtomicCmpXchg:
|
|
|
|
case Instruction::AtomicRMW:
|
|
|
|
case Instruction::Store:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-06 14:49:50 +08:00
|
|
|
bool Instruction::mayThrow() const {
|
|
|
|
if (const CallInst *CI = dyn_cast<CallInst>(this))
|
|
|
|
return !CI->doesNotThrow();
|
2015-08-01 01:58:14 +08:00
|
|
|
if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
|
|
|
|
return CRI->unwindsToCaller();
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
|
|
|
|
return CatchSwitch->unwindsToCaller();
|
2011-08-17 05:15:50 +08:00
|
|
|
return isa<ResumeInst>(this);
|
2009-05-06 14:49:50 +08:00
|
|
|
}
|
|
|
|
|
2018-01-10 05:58:46 +08:00
|
|
|
bool Instruction::isSafeToRemove() const {
|
|
|
|
return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
|
2018-08-26 17:51:22 +08:00
|
|
|
!this->isTerminator();
|
2018-01-10 05:58:46 +08:00
|
|
|
}
|
|
|
|
|
2018-12-22 05:49:40 +08:00
|
|
|
bool Instruction::isLifetimeStartOrEnd() const {
|
|
|
|
auto II = dyn_cast<IntrinsicInst>(this);
|
|
|
|
if (!II)
|
|
|
|
return false;
|
|
|
|
Intrinsic::ID ID = II->getIntrinsicID();
|
|
|
|
return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
|
|
|
|
}
|
|
|
|
|
2018-06-20 07:42:17 +08:00
|
|
|
const Instruction *Instruction::getNextNonDebugInstruction() const {
|
|
|
|
for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
|
|
|
|
if (!isa<DbgInfoIntrinsic>(I))
|
|
|
|
return I;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2018-11-09 17:42:10 +08:00
|
|
|
const Instruction *Instruction::getPrevNonDebugInstruction() const {
|
|
|
|
for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
|
|
|
|
if (!isa<DbgInfoIntrinsic>(I))
|
|
|
|
return I;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2012-11-29 09:47:31 +08:00
|
|
|
bool Instruction::isAssociative() const {
|
|
|
|
unsigned Opcode = getOpcode();
|
|
|
|
if (isAssociative(Opcode))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
switch (Opcode) {
|
|
|
|
case FMul:
|
|
|
|
case FAdd:
|
2018-05-25 04:16:43 +08:00
|
|
|
return cast<FPMathOperator>(this)->hasAllowReassoc() &&
|
|
|
|
cast<FPMathOperator>(this)->hasNoSignedZeros();
|
2012-11-29 09:47:31 +08:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-26 16:41:15 +08:00
|
|
|
unsigned Instruction::getNumSuccessors() const {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
#define HANDLE_TERM_INST(N, OPC, CLASS) \
|
|
|
|
case Instruction::OPC: \
|
|
|
|
return static_cast<const CLASS *>(this)->getNumSuccessors();
|
|
|
|
#include "llvm/IR/Instruction.def"
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
llvm_unreachable("not a terminator");
|
|
|
|
}
|
|
|
|
|
|
|
|
BasicBlock *Instruction::getSuccessor(unsigned idx) const {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
#define HANDLE_TERM_INST(N, OPC, CLASS) \
|
|
|
|
case Instruction::OPC: \
|
|
|
|
return static_cast<const CLASS *>(this)->getSuccessor(idx);
|
|
|
|
#include "llvm/IR/Instruction.def"
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
llvm_unreachable("not a terminator");
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
#define HANDLE_TERM_INST(N, OPC, CLASS) \
|
|
|
|
case Instruction::OPC: \
|
|
|
|
return static_cast<CLASS *>(this)->setSuccessor(idx, B);
|
|
|
|
#include "llvm/IR/Instruction.def"
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
llvm_unreachable("not a terminator");
|
|
|
|
}
|
|
|
|
|
2019-05-06 02:59:22 +08:00
|
|
|
void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
|
|
|
|
for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
|
|
|
|
Idx != NumSuccessors; ++Idx)
|
|
|
|
if (getSuccessor(Idx) == OldBB)
|
|
|
|
setSuccessor(Idx, NewBB);
|
|
|
|
}
|
|
|
|
|
2015-06-25 04:22:23 +08:00
|
|
|
Instruction *Instruction::cloneImpl() const {
|
|
|
|
llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
|
|
|
|
}
|
|
|
|
|
2016-08-23 23:39:03 +08:00
|
|
|
void Instruction::swapProfMetadata() {
|
|
|
|
MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
|
|
|
|
if (!ProfileData || ProfileData->getNumOperands() != 3 ||
|
|
|
|
!isa<MDString>(ProfileData->getOperand(0)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
MDString *MDName = cast<MDString>(ProfileData->getOperand(0));
|
|
|
|
if (MDName->getString() != "branch_weights")
|
|
|
|
return;
|
|
|
|
|
|
|
|
// The first operand is the name. Fetch them backwards and build a new one.
|
|
|
|
Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
|
|
|
|
ProfileData->getOperand(1)};
|
|
|
|
setMetadata(LLVMContext::MD_prof,
|
|
|
|
MDNode::get(ProfileData->getContext(), Ops));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::copyMetadata(const Instruction &SrcInst,
|
|
|
|
ArrayRef<unsigned> WL) {
|
|
|
|
if (!SrcInst.hasMetadata())
|
|
|
|
return;
|
|
|
|
|
|
|
|
DenseSet<unsigned> WLS;
|
|
|
|
for (unsigned M : WL)
|
|
|
|
WLS.insert(M);
|
|
|
|
|
|
|
|
// Otherwise, enumerate and copy over metadata from the old instruction to the
|
|
|
|
// new one.
|
|
|
|
SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
|
|
|
|
SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
|
|
|
|
for (const auto &MD : TheMDs) {
|
|
|
|
if (WL.empty() || WLS.count(MD.first))
|
|
|
|
setMetadata(MD.first, MD.second);
|
|
|
|
}
|
|
|
|
if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
|
|
|
|
setDebugLoc(SrcInst.getDebugLoc());
|
|
|
|
}
|
|
|
|
|
2009-10-28 06:16:29 +08:00
|
|
|
Instruction *Instruction::clone() const {
|
2015-06-25 04:22:23 +08:00
|
|
|
Instruction *New = nullptr;
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unhandled Opcode.");
|
|
|
|
#define HANDLE_INST(num, opc, clas) \
|
|
|
|
case Instruction::opc: \
|
|
|
|
New = cast<clas>(this)->cloneImpl(); \
|
|
|
|
break;
|
|
|
|
#include "llvm/IR/Instruction.def"
|
|
|
|
#undef HANDLE_INST
|
|
|
|
}
|
|
|
|
|
2009-10-28 06:16:29 +08:00
|
|
|
New->SubclassOptionalData = SubclassOptionalData;
|
2016-08-23 23:39:03 +08:00
|
|
|
New->copyMetadata(*this);
|
2009-10-28 06:16:29 +08:00
|
|
|
return New;
|
|
|
|
}
|