2003-10-13 11:32:08 +08:00
|
|
|
//===-- Instruction.cpp - Implement the Instruction class -----------------===//
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2001-06-07 04:29:01 +08:00
|
|
|
//
|
2013-01-02 17:10:48 +08:00
|
|
|
// This file implements the Instruction class for the IR library.
|
2001-06-07 04:29:01 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Instruction.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/ADT/DenseSet.h"
|
2014-03-04 19:01:28 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
2017-03-21 00:40:44 +08:00
|
|
|
#include "llvm/IR/MDBuilder.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Operator.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
2003-11-21 01:45:12 +08:00
|
|
|
using namespace llvm;
|
2003-11-12 06:41:34 +08:00
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
|
2007-02-24 08:55:48 +08:00
|
|
|
Instruction *InsertBefore)
|
2014-04-09 14:08:46 +08:00
|
|
|
: User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
|
2002-09-10 23:45:53 +08:00
|
|
|
|
|
|
|
// If requested, insert this instruction into a basic block...
|
|
|
|
if (InsertBefore) {
|
2015-06-16 01:03:35 +08:00
|
|
|
BasicBlock *BB = InsertBefore->getParent();
|
|
|
|
assert(BB && "Instruction to insert before is not in a basic block!");
|
2015-10-09 07:49:46 +08:00
|
|
|
BB->getInstList().insert(InsertBefore->getIterator(), this);
|
2002-09-10 23:45:53 +08:00
|
|
|
}
|
2001-06-07 04:29:01 +08:00
|
|
|
}
|
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
|
2007-02-24 08:55:48 +08:00
|
|
|
BasicBlock *InsertAtEnd)
|
2014-04-09 14:08:46 +08:00
|
|
|
: User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
|
2004-05-27 05:41:09 +08:00
|
|
|
|
|
|
|
// append this instruction into the basic block
|
|
|
|
assert(InsertAtEnd && "Basic block to append to may not be NULL!");
|
|
|
|
InsertAtEnd->getInstList().push_back(this);
|
2007-02-13 15:54:42 +08:00
|
|
|
}
|
|
|
|
|
2007-12-10 10:14:30 +08:00
|
|
|
Instruction::~Instruction() {
|
2014-04-15 14:32:26 +08:00
|
|
|
assert(!Parent && "Instruction still linked in the program!");
|
2010-07-21 06:25:04 +08:00
|
|
|
if (hasMetadataHashEntry())
|
|
|
|
clearMetadataHashEntries();
|
2006-06-22 00:53:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-07 05:33:15 +08:00
|
|
|
void Instruction::setParent(BasicBlock *P) {
|
|
|
|
Parent = P;
|
|
|
|
}
|
|
|
|
|
2015-03-04 06:01:13 +08:00
|
|
|
const Module *Instruction::getModule() const {
|
|
|
|
return getParent()->getModule();
|
|
|
|
}
|
|
|
|
|
2015-12-08 08:13:12 +08:00
|
|
|
const Function *Instruction::getFunction() const {
|
|
|
|
return getParent()->getParent();
|
|
|
|
}
|
2015-05-27 05:03:23 +08:00
|
|
|
|
2004-10-12 06:21:39 +08:00
|
|
|
void Instruction::removeFromParent() {
|
2015-10-09 07:49:46 +08:00
|
|
|
getParent()->getInstList().remove(getIterator());
|
2004-10-12 06:21:39 +08:00
|
|
|
}
|
|
|
|
|
2015-04-02 08:03:07 +08:00
|
|
|
iplist<Instruction>::iterator Instruction::eraseFromParent() {
|
2015-10-09 07:49:46 +08:00
|
|
|
return getParent()->getInstList().erase(getIterator());
|
2004-10-12 06:21:39 +08:00
|
|
|
}
|
2002-07-15 07:09:40 +08:00
|
|
|
|
2016-01-06 08:18:29 +08:00
|
|
|
/// Insert an unlinked instruction into a basic block immediately before the
|
|
|
|
/// specified instruction.
|
2008-06-18 02:29:27 +08:00
|
|
|
void Instruction::insertBefore(Instruction *InsertPos) {
|
2015-10-09 07:49:46 +08:00
|
|
|
InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
|
2008-06-18 02:29:27 +08:00
|
|
|
}
|
|
|
|
|
2016-01-06 08:18:29 +08:00
|
|
|
/// Insert an unlinked instruction into a basic block immediately after the
|
|
|
|
/// specified instruction.
|
2009-01-13 15:43:51 +08:00
|
|
|
void Instruction::insertAfter(Instruction *InsertPos) {
|
2015-10-09 07:49:46 +08:00
|
|
|
InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
|
|
|
|
this);
|
2009-01-13 15:43:51 +08:00
|
|
|
}
|
|
|
|
|
2016-01-06 08:18:29 +08:00
|
|
|
/// Unlink this instruction from its current basic block and insert it into the
|
|
|
|
/// basic block that MovePos lives in, right before MovePos.
|
2005-08-08 13:21:50 +08:00
|
|
|
void Instruction::moveBefore(Instruction *MovePos) {
|
2016-08-17 09:54:41 +08:00
|
|
|
moveBefore(*MovePos->getParent(), MovePos->getIterator());
|
|
|
|
}
|
|
|
|
|
2017-08-29 22:07:48 +08:00
|
|
|
void Instruction::moveAfter(Instruction *MovePos) {
|
|
|
|
moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
|
|
|
|
}
|
|
|
|
|
2016-08-17 09:54:41 +08:00
|
|
|
void Instruction::moveBefore(BasicBlock &BB,
|
|
|
|
SymbolTableList<Instruction>::iterator I) {
|
|
|
|
assert(I == BB.end() || I->getParent() == &BB);
|
|
|
|
BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
|
2005-08-08 13:21:50 +08:00
|
|
|
}
|
|
|
|
|
2016-04-22 14:37:45 +08:00
|
|
|
void Instruction::setHasNoUnsignedWrap(bool b) {
|
|
|
|
cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setHasNoSignedWrap(bool b) {
|
|
|
|
cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setIsExact(bool b) {
|
|
|
|
cast<PossiblyExactOperator>(this)->setIsExact(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasNoUnsignedWrap() const {
|
|
|
|
return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasNoSignedWrap() const {
|
|
|
|
return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
|
|
|
|
}
|
|
|
|
|
2017-02-24 06:50:52 +08:00
|
|
|
void Instruction::dropPoisonGeneratingFlags() {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::Mul:
|
|
|
|
case Instruction::Shl:
|
|
|
|
cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
|
|
|
|
cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::AShr:
|
|
|
|
case Instruction::LShr:
|
|
|
|
cast<PossiblyExactOperator>(this)->setIsExact(false);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Instruction::GetElementPtr:
|
|
|
|
cast<GetElementPtrInst>(this)->setIsInBounds(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-22 14:37:45 +08:00
|
|
|
bool Instruction::isExact() const {
|
|
|
|
return cast<PossiblyExactOperator>(this)->isExact();
|
|
|
|
}
|
|
|
|
|
2012-11-27 08:41:22 +08:00
|
|
|
void Instruction::setHasUnsafeAlgebra(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasUnsafeAlgebra(B);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setHasNoNaNs(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasNoNaNs(B);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setHasNoInfs(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasNoInfs(B);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setHasNoSignedZeros(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setHasAllowReciprocal(bool B) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::setFastMathFlags(FastMathFlags FMF) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->setFastMathFlags(FMF);
|
|
|
|
}
|
|
|
|
|
2014-09-03 04:03:00 +08:00
|
|
|
void Instruction::copyFastMathFlags(FastMathFlags FMF) {
|
|
|
|
assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
|
|
|
|
cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
|
|
|
|
}
|
|
|
|
|
2012-11-27 08:41:22 +08:00
|
|
|
bool Instruction::hasUnsafeAlgebra() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
2012-11-27 08:41:22 +08:00
|
|
|
return cast<FPMathOperator>(this)->hasUnsafeAlgebra();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasNoNaNs() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
2012-11-27 08:41:22 +08:00
|
|
|
return cast<FPMathOperator>(this)->hasNoNaNs();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasNoInfs() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
2012-11-27 08:41:22 +08:00
|
|
|
return cast<FPMathOperator>(this)->hasNoInfs();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasNoSignedZeros() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
2012-11-27 08:41:22 +08:00
|
|
|
return cast<FPMathOperator>(this)->hasNoSignedZeros();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasAllowReciprocal() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
2012-11-27 08:41:22 +08:00
|
|
|
return cast<FPMathOperator>(this)->hasAllowReciprocal();
|
|
|
|
}
|
|
|
|
|
2017-03-29 04:11:52 +08:00
|
|
|
bool Instruction::hasAllowContract() const {
|
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
|
|
|
return cast<FPMathOperator>(this)->hasAllowContract();
|
|
|
|
}
|
|
|
|
|
2012-11-27 08:41:22 +08:00
|
|
|
FastMathFlags Instruction::getFastMathFlags() const {
|
2014-06-12 02:26:29 +08:00
|
|
|
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
|
2012-11-27 08:41:22 +08:00
|
|
|
return cast<FPMathOperator>(this)->getFastMathFlags();
|
|
|
|
}
|
2005-08-08 13:21:50 +08:00
|
|
|
|
2012-11-30 05:25:12 +08:00
|
|
|
void Instruction::copyFastMathFlags(const Instruction *I) {
|
2014-09-03 04:03:00 +08:00
|
|
|
copyFastMathFlags(I->getFastMathFlags());
|
2012-11-30 05:25:12 +08:00
|
|
|
}
|
|
|
|
|
[LoopVectorize] Don't preserve nsw/nuw flags on shrunken ops.
If we're shrinking a binary operation, it may be the case that the new
operations wraps where the old didn't. If this happens, the behavior
should be well-defined. So, we can't always carry wrapping flags with us
when we shrink operations.
If we do, we get incorrect optimizations in cases like:
void foo(const unsigned char *from, unsigned char *to, int n) {
for (int i = 0; i < n; i++)
to[i] = from[i] - 128;
}
which gets optimized to:
void foo(const unsigned char *from, unsigned char *to, int n) {
for (int i = 0; i < n; i++)
to[i] = from[i] | 128;
}
Because:
- InstCombine turned `sub i32 %from.i, 128` into
`add nuw nsw i32 %from.i, 128`.
- LoopVectorize vectorized the add to be `add nuw nsw <16 x i8>` with a
vector full of `i8 128`s
- InstCombine took advantage of the fact that the newly-shrunken add
"couldn't wrap", and changed the `add` to an `or`.
InstCombine seems happy to figure out whether we can add nuw/nsw on its
own, so I just decided to drop the flags. There are already a number of
places in LoopVectorize where we rely on InstCombine to clean up.
llvm-svn: 305053
2017-06-09 11:56:15 +08:00
|
|
|
void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
|
2016-04-22 14:37:45 +08:00
|
|
|
// Copy the wrapping flags.
|
[LoopVectorize] Don't preserve nsw/nuw flags on shrunken ops.
If we're shrinking a binary operation, it may be the case that the new
operations wraps where the old didn't. If this happens, the behavior
should be well-defined. So, we can't always carry wrapping flags with us
when we shrink operations.
If we do, we get incorrect optimizations in cases like:
void foo(const unsigned char *from, unsigned char *to, int n) {
for (int i = 0; i < n; i++)
to[i] = from[i] - 128;
}
which gets optimized to:
void foo(const unsigned char *from, unsigned char *to, int n) {
for (int i = 0; i < n; i++)
to[i] = from[i] | 128;
}
Because:
- InstCombine turned `sub i32 %from.i, 128` into
`add nuw nsw i32 %from.i, 128`.
- LoopVectorize vectorized the add to be `add nuw nsw <16 x i8>` with a
vector full of `i8 128`s
- InstCombine took advantage of the fact that the newly-shrunken add
"couldn't wrap", and changed the `add` to an `or`.
InstCombine seems happy to figure out whether we can add nuw/nsw on its
own, so I just decided to drop the flags. There are already a number of
places in LoopVectorize where we rely on InstCombine to clean up.
llvm-svn: 305053
2017-06-09 11:56:15 +08:00
|
|
|
if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
|
|
|
|
if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
|
2016-04-22 14:37:51 +08:00
|
|
|
setHasNoSignedWrap(OB->hasNoSignedWrap());
|
|
|
|
setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
|
|
|
|
}
|
2016-04-22 14:37:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy the exact flag.
|
|
|
|
if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
|
2016-04-22 14:37:51 +08:00
|
|
|
if (isa<PossiblyExactOperator>(this))
|
|
|
|
setIsExact(PE->isExact());
|
2016-04-22 14:37:45 +08:00
|
|
|
|
|
|
|
// Copy the fast-math flags.
|
|
|
|
if (auto *FP = dyn_cast<FPMathOperator>(V))
|
2016-04-22 14:37:51 +08:00
|
|
|
if (isa<FPMathOperator>(this))
|
|
|
|
copyFastMathFlags(FP->getFastMathFlags());
|
2016-07-15 13:02:31 +08:00
|
|
|
|
|
|
|
if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
|
|
|
|
if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
|
|
|
|
DestGEP->setIsInBounds(SrcGEP->isInBounds() | DestGEP->isInBounds());
|
2016-04-22 14:37:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::andIRFlags(const Value *V) {
|
|
|
|
if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
|
2016-04-22 14:37:51 +08:00
|
|
|
if (isa<OverflowingBinaryOperator>(this)) {
|
|
|
|
setHasNoSignedWrap(hasNoSignedWrap() & OB->hasNoSignedWrap());
|
|
|
|
setHasNoUnsignedWrap(hasNoUnsignedWrap() & OB->hasNoUnsignedWrap());
|
|
|
|
}
|
2016-04-22 14:37:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
|
2016-04-22 14:37:51 +08:00
|
|
|
if (isa<PossiblyExactOperator>(this))
|
|
|
|
setIsExact(isExact() & PE->isExact());
|
2016-04-22 14:37:45 +08:00
|
|
|
|
|
|
|
if (auto *FP = dyn_cast<FPMathOperator>(V)) {
|
2016-04-22 14:37:51 +08:00
|
|
|
if (isa<FPMathOperator>(this)) {
|
|
|
|
FastMathFlags FM = getFastMathFlags();
|
|
|
|
FM &= FP->getFastMathFlags();
|
|
|
|
copyFastMathFlags(FM);
|
|
|
|
}
|
2016-04-22 14:37:45 +08:00
|
|
|
}
|
2016-07-15 13:02:31 +08:00
|
|
|
|
|
|
|
if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
|
|
|
|
if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
|
|
|
|
DestGEP->setIsInBounds(SrcGEP->isInBounds() & DestGEP->isInBounds());
|
2016-04-22 14:37:45 +08:00
|
|
|
}
|
2012-11-30 05:25:12 +08:00
|
|
|
|
2002-07-15 07:09:40 +08:00
|
|
|
const char *Instruction::getOpcodeName(unsigned OpCode) {
|
|
|
|
switch (OpCode) {
|
|
|
|
// Terminators
|
2002-08-15 02:18:02 +08:00
|
|
|
case Ret: return "ret";
|
|
|
|
case Br: return "br";
|
2002-07-15 07:09:40 +08:00
|
|
|
case Switch: return "switch";
|
2009-10-28 08:19:10 +08:00
|
|
|
case IndirectBr: return "indirectbr";
|
2002-07-15 07:09:40 +08:00
|
|
|
case Invoke: return "invoke";
|
2011-07-31 14:30:59 +08:00
|
|
|
case Resume: return "resume";
|
2004-10-17 02:08:06 +08:00
|
|
|
case Unreachable: return "unreachable";
|
2015-08-01 01:58:14 +08:00
|
|
|
case CleanupRet: return "cleanupret";
|
|
|
|
case CatchRet: return "catchret";
|
|
|
|
case CatchPad: return "catchpad";
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
case CatchSwitch: return "catchswitch";
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2002-07-15 07:09:40 +08:00
|
|
|
// Standard binary operators...
|
|
|
|
case Add: return "add";
|
2009-06-05 06:49:04 +08:00
|
|
|
case FAdd: return "fadd";
|
2002-07-15 07:09:40 +08:00
|
|
|
case Sub: return "sub";
|
2009-06-05 06:49:04 +08:00
|
|
|
case FSub: return "fsub";
|
2002-07-15 07:09:40 +08:00
|
|
|
case Mul: return "mul";
|
2009-06-05 06:49:04 +08:00
|
|
|
case FMul: return "fmul";
|
2006-10-26 14:15:43 +08:00
|
|
|
case UDiv: return "udiv";
|
|
|
|
case SDiv: return "sdiv";
|
|
|
|
case FDiv: return "fdiv";
|
2006-11-02 09:53:59 +08:00
|
|
|
case URem: return "urem";
|
|
|
|
case SRem: return "srem";
|
|
|
|
case FRem: return "frem";
|
2002-07-15 07:09:40 +08:00
|
|
|
|
|
|
|
// Logical operators...
|
|
|
|
case And: return "and";
|
|
|
|
case Or : return "or";
|
|
|
|
case Xor: return "xor";
|
|
|
|
|
|
|
|
// Memory instructions...
|
|
|
|
case Alloca: return "alloca";
|
|
|
|
case Load: return "load";
|
|
|
|
case Store: return "store";
|
2011-07-29 05:48:00 +08:00
|
|
|
case AtomicCmpXchg: return "cmpxchg";
|
|
|
|
case AtomicRMW: return "atomicrmw";
|
2011-07-26 07:16:38 +08:00
|
|
|
case Fence: return "fence";
|
2002-07-15 07:09:40 +08:00
|
|
|
case GetElementPtr: return "getelementptr";
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2006-11-27 09:05:10 +08:00
|
|
|
// Convert instructions...
|
2013-11-15 09:34:59 +08:00
|
|
|
case Trunc: return "trunc";
|
|
|
|
case ZExt: return "zext";
|
|
|
|
case SExt: return "sext";
|
|
|
|
case FPTrunc: return "fptrunc";
|
|
|
|
case FPExt: return "fpext";
|
|
|
|
case FPToUI: return "fptoui";
|
|
|
|
case FPToSI: return "fptosi";
|
|
|
|
case UIToFP: return "uitofp";
|
|
|
|
case SIToFP: return "sitofp";
|
|
|
|
case IntToPtr: return "inttoptr";
|
|
|
|
case PtrToInt: return "ptrtoint";
|
|
|
|
case BitCast: return "bitcast";
|
|
|
|
case AddrSpaceCast: return "addrspacecast";
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2002-07-15 07:09:40 +08:00
|
|
|
// Other instructions...
|
2006-12-03 14:27:29 +08:00
|
|
|
case ICmp: return "icmp";
|
|
|
|
case FCmp: return "fcmp";
|
2006-11-27 09:05:10 +08:00
|
|
|
case PHI: return "phi";
|
|
|
|
case Select: return "select";
|
|
|
|
case Call: return "call";
|
|
|
|
case Shl: return "shl";
|
|
|
|
case LShr: return "lshr";
|
|
|
|
case AShr: return "ashr";
|
|
|
|
case VAArg: return "va_arg";
|
2006-01-11 03:05:34 +08:00
|
|
|
case ExtractElement: return "extractelement";
|
2006-11-27 09:05:10 +08:00
|
|
|
case InsertElement: return "insertelement";
|
|
|
|
case ShuffleVector: return "shufflevector";
|
2008-05-30 18:31:54 +08:00
|
|
|
case ExtractValue: return "extractvalue";
|
|
|
|
case InsertValue: return "insertvalue";
|
2011-08-13 04:24:12 +08:00
|
|
|
case LandingPad: return "landingpad";
|
2015-08-23 08:26:33 +08:00
|
|
|
case CleanupPad: return "cleanuppad";
|
2003-05-08 10:44:12 +08:00
|
|
|
|
2002-07-15 07:09:40 +08:00
|
|
|
default: return "<Invalid operator> ";
|
|
|
|
}
|
|
|
|
}
|
2002-10-31 12:14:01 +08:00
|
|
|
|
2016-10-06 02:51:12 +08:00
|
|
|
/// Return true if both instructions have the same special state. This must be
|
2016-04-12 06:30:37 +08:00
|
|
|
/// kept in sync with FunctionComparator::cmpOperations in
|
|
|
|
/// lib/Transforms/IPO/MergeFunctions.cpp.
|
2014-05-28 05:35:46 +08:00
|
|
|
static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
|
|
|
|
bool IgnoreAlignment = false) {
|
|
|
|
assert(I1->getOpcode() == I2->getOpcode() &&
|
|
|
|
"Can not compare special state of different instructions");
|
|
|
|
|
2016-04-13 02:06:55 +08:00
|
|
|
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
|
|
|
|
return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
|
|
|
|
(AI->getAlignment() == cast<AllocaInst>(I2)->getAlignment() ||
|
|
|
|
IgnoreAlignment);
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
|
|
|
|
return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
|
|
|
|
(LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
|
|
|
|
IgnoreAlignment) &&
|
|
|
|
LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
|
2017-07-12 06:23:00 +08:00
|
|
|
LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
|
|
|
|
return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
|
|
|
|
(SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
|
|
|
|
IgnoreAlignment) &&
|
|
|
|
SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
|
2017-07-12 06:23:00 +08:00
|
|
|
SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
|
|
|
|
return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
|
|
|
|
if (const CallInst *CI = dyn_cast<CallInst>(I1))
|
|
|
|
return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
|
|
|
|
CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
|
2015-12-15 03:11:35 +08:00
|
|
|
CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
|
|
|
|
CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
|
|
|
|
return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
|
2015-12-15 03:11:35 +08:00
|
|
|
CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
|
|
|
|
CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
|
|
|
|
return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
|
|
|
|
if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
|
|
|
|
return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
|
|
|
|
if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
|
|
|
|
return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
|
2017-07-12 06:23:00 +08:00
|
|
|
FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
|
|
|
|
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
|
2014-05-28 05:35:46 +08:00
|
|
|
CXI->getSuccessOrdering() ==
|
|
|
|
cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
|
|
|
|
CXI->getFailureOrdering() ==
|
|
|
|
cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
|
2017-07-12 06:23:00 +08:00
|
|
|
CXI->getSyncScopeID() ==
|
|
|
|
cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
|
2014-05-28 05:35:46 +08:00
|
|
|
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
|
|
|
|
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
|
|
|
|
RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
|
|
|
|
RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
|
2017-07-12 06:23:00 +08:00
|
|
|
RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
|
2014-05-28 05:35:46 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-11-27 16:39:18 +08:00
|
|
|
bool Instruction::isIdenticalTo(const Instruction *I) const {
|
2009-08-26 06:24:20 +08:00
|
|
|
return isIdenticalToWhenDefined(I) &&
|
2009-08-26 06:11:20 +08:00
|
|
|
SubclassOptionalData == I->SubclassOptionalData;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
|
2004-11-30 10:51:53 +08:00
|
|
|
if (getOpcode() != I->getOpcode() ||
|
|
|
|
getNumOperands() != I->getNumOperands() ||
|
|
|
|
getType() != I->getType())
|
|
|
|
return false;
|
|
|
|
|
2014-06-02 09:35:34 +08:00
|
|
|
// If both instructions have no operands, they are identical.
|
|
|
|
if (getNumOperands() == 0 && I->getNumOperands() == 0)
|
|
|
|
return haveSameSpecialState(this, I);
|
|
|
|
|
2004-11-30 10:51:53 +08:00
|
|
|
// We have two instructions of identical opcode and #operands. Check to see
|
|
|
|
// if all operands are the same.
|
2014-03-10 23:03:06 +08:00
|
|
|
if (!std::equal(op_begin(), op_end(), I->op_begin()))
|
|
|
|
return false;
|
2004-11-30 10:51:53 +08:00
|
|
|
|
2012-05-10 23:59:41 +08:00
|
|
|
if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
|
|
|
|
const PHINode *otherPHI = cast<PHINode>(I);
|
2014-03-10 23:03:06 +08:00
|
|
|
return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
|
|
|
|
otherPHI->block_begin());
|
2012-05-10 23:59:41 +08:00
|
|
|
}
|
2014-05-28 05:35:46 +08:00
|
|
|
|
|
|
|
return haveSameSpecialState(this, I);
|
2006-12-23 14:05:41 +08:00
|
|
|
}
|
|
|
|
|
2016-04-12 06:30:37 +08:00
|
|
|
// Keep this in sync with FunctionComparator::cmpOperations in
|
2009-06-13 03:03:05 +08:00
|
|
|
// lib/Transforms/IPO/MergeFunctions.cpp.
|
2012-06-28 13:42:26 +08:00
|
|
|
bool Instruction::isSameOperationAs(const Instruction *I,
|
|
|
|
unsigned flags) const {
|
|
|
|
bool IgnoreAlignment = flags & CompareIgnoringAlignment;
|
|
|
|
bool UseScalarTypes = flags & CompareUsingScalarTypes;
|
|
|
|
|
2009-06-13 03:03:05 +08:00
|
|
|
if (getOpcode() != I->getOpcode() ||
|
|
|
|
getNumOperands() != I->getNumOperands() ||
|
2012-06-28 13:42:26 +08:00
|
|
|
(UseScalarTypes ?
|
|
|
|
getType()->getScalarType() != I->getType()->getScalarType() :
|
|
|
|
getType() != I->getType()))
|
2006-12-23 14:05:41 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// We have two instructions of identical opcode and #operands. Check to see
|
|
|
|
// if all operands are the same type
|
|
|
|
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
|
2012-06-28 13:42:26 +08:00
|
|
|
if (UseScalarTypes ?
|
|
|
|
getOperand(i)->getType()->getScalarType() !=
|
|
|
|
I->getOperand(i)->getType()->getScalarType() :
|
|
|
|
getOperand(i)->getType() != I->getOperand(i)->getType())
|
2006-12-23 14:05:41 +08:00
|
|
|
return false;
|
|
|
|
|
2014-05-28 05:35:46 +08:00
|
|
|
return haveSameSpecialState(this, I, IgnoreAlignment);
|
2004-11-30 10:51:53 +08:00
|
|
|
}
|
|
|
|
|
2008-04-21 06:11:30 +08:00
|
|
|
bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
|
2014-03-09 11:16:01 +08:00
|
|
|
for (const Use &U : uses()) {
|
2008-04-21 06:11:30 +08:00
|
|
|
// PHI nodes uses values in the corresponding predecessor block. For other
|
|
|
|
// instructions, just check to see whether the parent of the use matches up.
|
2014-03-09 11:16:01 +08:00
|
|
|
const Instruction *I = cast<Instruction>(U.getUser());
|
|
|
|
const PHINode *PN = dyn_cast<PHINode>(I);
|
2014-04-09 14:08:46 +08:00
|
|
|
if (!PN) {
|
2014-03-09 11:16:01 +08:00
|
|
|
if (I->getParent() != BB)
|
2008-04-21 06:11:30 +08:00
|
|
|
return true;
|
|
|
|
continue;
|
|
|
|
}
|
2009-09-20 10:20:51 +08:00
|
|
|
|
2014-03-09 11:16:01 +08:00
|
|
|
if (PN->getIncomingBlock(U) != BB)
|
2008-04-21 06:11:30 +08:00
|
|
|
return true;
|
|
|
|
}
|
2009-09-20 10:20:51 +08:00
|
|
|
return false;
|
2008-04-21 06:11:30 +08:00
|
|
|
}
|
|
|
|
|
2008-05-09 01:16:51 +08:00
|
|
|
bool Instruction::mayReadFromMemory() const {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default: return false;
|
|
|
|
case Instruction::VAArg:
|
2008-05-09 05:58:49 +08:00
|
|
|
case Instruction::Load:
|
2011-07-27 09:08:30 +08:00
|
|
|
case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
|
2011-07-29 11:05:32 +08:00
|
|
|
case Instruction::AtomicCmpXchg:
|
|
|
|
case Instruction::AtomicRMW:
|
2015-09-11 02:50:09 +08:00
|
|
|
case Instruction::CatchPad:
|
2015-08-01 01:58:14 +08:00
|
|
|
case Instruction::CatchRet:
|
2008-05-09 01:16:51 +08:00
|
|
|
return true;
|
|
|
|
case Instruction::Call:
|
|
|
|
return !cast<CallInst>(this)->doesNotAccessMemory();
|
|
|
|
case Instruction::Invoke:
|
|
|
|
return !cast<InvokeInst>(this)->doesNotAccessMemory();
|
2008-05-09 05:58:49 +08:00
|
|
|
case Instruction::Store:
|
2011-08-16 05:00:18 +08:00
|
|
|
return !cast<StoreInst>(this)->isUnordered();
|
2008-05-09 01:16:51 +08:00
|
|
|
}
|
|
|
|
}
|
2008-04-21 06:11:30 +08:00
|
|
|
|
2007-02-16 07:15:00 +08:00
|
|
|
bool Instruction::mayWriteToMemory() const {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default: return false;
|
2011-07-27 09:08:30 +08:00
|
|
|
case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
|
2007-12-04 04:06:50 +08:00
|
|
|
case Instruction::Store:
|
2007-02-16 07:15:00 +08:00
|
|
|
case Instruction::VAArg:
|
2011-07-29 11:05:32 +08:00
|
|
|
case Instruction::AtomicCmpXchg:
|
|
|
|
case Instruction::AtomicRMW:
|
2015-09-11 02:50:09 +08:00
|
|
|
case Instruction::CatchPad:
|
2015-08-01 01:58:14 +08:00
|
|
|
case Instruction::CatchRet:
|
2007-02-16 07:15:00 +08:00
|
|
|
return true;
|
|
|
|
case Instruction::Call:
|
2007-12-04 04:06:50 +08:00
|
|
|
return !cast<CallInst>(this)->onlyReadsMemory();
|
2008-05-09 01:16:51 +08:00
|
|
|
case Instruction::Invoke:
|
|
|
|
return !cast<InvokeInst>(this)->onlyReadsMemory();
|
2007-02-16 07:15:00 +08:00
|
|
|
case Instruction::Load:
|
2011-08-16 05:00:18 +08:00
|
|
|
return !cast<LoadInst>(this)->isUnordered();
|
2007-02-16 07:15:00 +08:00
|
|
|
}
|
|
|
|
}
|
2002-10-31 12:14:01 +08:00
|
|
|
|
2014-09-04 05:29:59 +08:00
|
|
|
bool Instruction::isAtomic() const {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case Instruction::AtomicCmpXchg:
|
|
|
|
case Instruction::AtomicRMW:
|
|
|
|
case Instruction::Fence:
|
|
|
|
return true;
|
|
|
|
case Instruction::Load:
|
2016-04-07 05:19:33 +08:00
|
|
|
return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
|
2014-09-04 05:29:59 +08:00
|
|
|
case Instruction::Store:
|
2016-04-07 05:19:33 +08:00
|
|
|
return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
|
2014-09-04 05:29:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-09 23:27:17 +08:00
|
|
|
bool Instruction::hasAtomicLoad() const {
|
|
|
|
assert(isAtomic());
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case Instruction::AtomicCmpXchg:
|
|
|
|
case Instruction::AtomicRMW:
|
|
|
|
case Instruction::Load:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Instruction::hasAtomicStore() const {
|
|
|
|
assert(isAtomic());
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case Instruction::AtomicCmpXchg:
|
|
|
|
case Instruction::AtomicRMW:
|
|
|
|
case Instruction::Store:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-06 14:49:50 +08:00
|
|
|
bool Instruction::mayThrow() const {
|
|
|
|
if (const CallInst *CI = dyn_cast<CallInst>(this))
|
|
|
|
return !CI->doesNotThrow();
|
2015-08-01 01:58:14 +08:00
|
|
|
if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
|
|
|
|
return CRI->unwindsToCaller();
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
|
|
|
|
return CatchSwitch->unwindsToCaller();
|
2011-08-17 05:15:50 +08:00
|
|
|
return isa<ResumeInst>(this);
|
2009-05-06 14:49:50 +08:00
|
|
|
}
|
|
|
|
|
2012-11-29 09:47:31 +08:00
|
|
|
bool Instruction::isAssociative() const {
|
|
|
|
unsigned Opcode = getOpcode();
|
|
|
|
if (isAssociative(Opcode))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
switch (Opcode) {
|
|
|
|
case FMul:
|
|
|
|
case FAdd:
|
|
|
|
return cast<FPMathOperator>(this)->hasUnsafeAlgebra();
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-25 04:22:23 +08:00
|
|
|
Instruction *Instruction::cloneImpl() const {
|
|
|
|
llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
|
|
|
|
}
|
|
|
|
|
2016-08-23 23:39:03 +08:00
|
|
|
void Instruction::swapProfMetadata() {
|
|
|
|
MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
|
|
|
|
if (!ProfileData || ProfileData->getNumOperands() != 3 ||
|
|
|
|
!isa<MDString>(ProfileData->getOperand(0)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
MDString *MDName = cast<MDString>(ProfileData->getOperand(0));
|
|
|
|
if (MDName->getString() != "branch_weights")
|
|
|
|
return;
|
|
|
|
|
|
|
|
// The first operand is the name. Fetch them backwards and build a new one.
|
|
|
|
Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
|
|
|
|
ProfileData->getOperand(1)};
|
|
|
|
setMetadata(LLVMContext::MD_prof,
|
|
|
|
MDNode::get(ProfileData->getContext(), Ops));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Instruction::copyMetadata(const Instruction &SrcInst,
|
|
|
|
ArrayRef<unsigned> WL) {
|
|
|
|
if (!SrcInst.hasMetadata())
|
|
|
|
return;
|
|
|
|
|
|
|
|
DenseSet<unsigned> WLS;
|
|
|
|
for (unsigned M : WL)
|
|
|
|
WLS.insert(M);
|
|
|
|
|
|
|
|
// Otherwise, enumerate and copy over metadata from the old instruction to the
|
|
|
|
// new one.
|
|
|
|
SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
|
|
|
|
SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
|
|
|
|
for (const auto &MD : TheMDs) {
|
|
|
|
if (WL.empty() || WLS.count(MD.first))
|
|
|
|
setMetadata(MD.first, MD.second);
|
|
|
|
}
|
|
|
|
if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
|
|
|
|
setDebugLoc(SrcInst.getDebugLoc());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-28 06:16:29 +08:00
|
|
|
Instruction *Instruction::clone() const {
|
2015-06-25 04:22:23 +08:00
|
|
|
Instruction *New = nullptr;
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unhandled Opcode.");
|
|
|
|
#define HANDLE_INST(num, opc, clas) \
|
|
|
|
case Instruction::opc: \
|
|
|
|
New = cast<clas>(this)->cloneImpl(); \
|
|
|
|
break;
|
|
|
|
#include "llvm/IR/Instruction.def"
|
|
|
|
#undef HANDLE_INST
|
|
|
|
}
|
|
|
|
|
2009-10-28 06:16:29 +08:00
|
|
|
New->SubclassOptionalData = SubclassOptionalData;
|
2016-08-23 23:39:03 +08:00
|
|
|
New->copyMetadata(*this);
|
2009-10-28 06:16:29 +08:00
|
|
|
return New;
|
|
|
|
}
|
2017-03-21 00:40:44 +08:00
|
|
|
|
|
|
|
void Instruction::updateProfWeight(uint64_t S, uint64_t T) {
|
|
|
|
auto *ProfileData = getMetadata(LLVMContext::MD_prof);
|
|
|
|
if (ProfileData == nullptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
|
2017-05-05 08:47:34 +08:00
|
|
|
if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
|
|
|
|
!ProfDataName->getString().equals("VP")))
|
2017-03-21 00:40:44 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
MDBuilder MDB(getContext());
|
2017-05-05 08:47:34 +08:00
|
|
|
SmallVector<Metadata *, 3> Vals;
|
|
|
|
Vals.push_back(ProfileData->getOperand(0));
|
|
|
|
APInt APS(128, S), APT(128, T);
|
|
|
|
if (ProfDataName->getString().equals("branch_weights"))
|
|
|
|
for (unsigned i = 1; i < ProfileData->getNumOperands(); i++) {
|
|
|
|
// Using APInt::div may be expensive, but most cases should fit 64 bits.
|
|
|
|
APInt Val(128,
|
|
|
|
mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i))
|
|
|
|
->getValue()
|
|
|
|
.getZExtValue());
|
|
|
|
Val *= APS;
|
|
|
|
Vals.push_back(MDB.createConstant(
|
|
|
|
ConstantInt::get(Type::getInt64Ty(getContext()),
|
|
|
|
Val.udiv(APT).getLimitedValue())));
|
|
|
|
}
|
|
|
|
else if (ProfDataName->getString().equals("VP"))
|
|
|
|
for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
|
|
|
|
// The first value is the key of the value profile, which will not change.
|
|
|
|
Vals.push_back(ProfileData->getOperand(i));
|
|
|
|
// Using APInt::div may be expensive, but most cases should fit 64 bits.
|
|
|
|
APInt Val(128,
|
|
|
|
mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
|
|
|
|
->getValue()
|
|
|
|
.getZExtValue());
|
|
|
|
Val *= APS;
|
|
|
|
Vals.push_back(MDB.createConstant(
|
|
|
|
ConstantInt::get(Type::getInt64Ty(getContext()),
|
|
|
|
Val.udiv(APT).getLimitedValue())));
|
|
|
|
}
|
|
|
|
setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
|
2017-03-21 00:40:44 +08:00
|
|
|
}
|
2017-03-24 07:26:00 +08:00
|
|
|
|
|
|
|
void Instruction::setProfWeight(uint64_t W) {
|
|
|
|
assert((isa<CallInst>(this) || isa<InvokeInst>(this)) &&
|
|
|
|
"Can only set weights for call and invoke instrucitons");
|
|
|
|
SmallVector<uint32_t, 1> Weights;
|
|
|
|
Weights.push_back(W);
|
|
|
|
MDBuilder MDB(getContext());
|
|
|
|
setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
|
|
|
|
}
|