[Bitcode] Support expanding constant expressions into instructions

This implements an autoupgrade from constant expressions to
instructions, which is needed for
https://discourse.llvm.org/t/rfc-remove-most-constant-expressions/63179.

The basic approach is that constant expressions (CST_CODE_CE_*
records) now initially only create a BitcodeConstant value that
holds opcode, flags and operands IDs. Then, when the value actually
gets used, it can be converted either into a constant expression
(if that expression type is still supported) or into a sequence of
instructions. As currently all expressions are still supported,
-expand-constant-exprs is added for testing purposes, to force
expansion.

PHI nodes require special handling, because the constant expression
needs to be evaluated on the incoming edge. We do this by putting
it into a temporary block and then wiring it up appropriately
afterwards (for non-critical edges, we could also move the
instructions into the predecessor).

This also removes the need for the forward referenced constants
machinery, as the BitcodeConstants only hold value IDs. At the
point where the value is actually materialized, no forward
references are needed anymore.

Differential Revision: https://reviews.llvm.org/D127729
This commit is contained in:
Nikita Popov 2022-06-08 17:29:42 +02:00
parent 180cc74de9
commit 941c8e0ea5
6 changed files with 903 additions and 578 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1227,7 +1227,8 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
}
MetadataList.assignValue(
LocalAsMetadata::get(ValueList.getValueFwdRef(Record[1], Ty, TyID)),
LocalAsMetadata::get(ValueList.getValueFwdRef(
Record[1], Ty, TyID, /*ConstExprInsertBB*/ nullptr)),
NextMetadataNo);
NextMetadataNo++;
break;
@ -1247,8 +1248,8 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
if (Ty->isMetadataTy())
Elts.push_back(getMD(Record[i + 1]));
else if (!Ty->isVoidTy()) {
auto *MD = ValueAsMetadata::get(
ValueList.getValueFwdRef(Record[i + 1], Ty, TyID));
auto *MD = ValueAsMetadata::get(ValueList.getValueFwdRef(
Record[i + 1], Ty, TyID, /*ConstExprInsertBB*/ nullptr));
assert(isa<ConstantAsMetadata>(MD) &&
"Expected non-function-local metadata");
Elts.push_back(MD);
@ -1269,7 +1270,8 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
return error("Invalid record");
MetadataList.assignValue(
ValueAsMetadata::get(ValueList.getValueFwdRef(Record[1], Ty, TyID)),
ValueAsMetadata::get(ValueList.getValueFwdRef(
Record[1], Ty, TyID, /*ConstExprInsertBB*/ nullptr)),
NextMetadataNo);
NextMetadataNo++;
break;

View File

@ -23,44 +23,6 @@
using namespace llvm;
namespace llvm {
namespace {
/// A class for maintaining the slot number definition
/// as a placeholder for the actual definition for forward constants defs.
class ConstantPlaceHolder : public ConstantExpr {
public:
explicit ConstantPlaceHolder(Type *Ty, LLVMContext &Context)
: ConstantExpr(Ty, Instruction::UserOp1, &Op<0>(), 1) {
Op<0>() = UndefValue::get(Type::getInt32Ty(Context));
}
ConstantPlaceHolder &operator=(const ConstantPlaceHolder &) = delete;
// allocate space for exactly one operand
void *operator new(size_t s) { return User::operator new(s, 1); }
/// Methods to support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Value *V) {
return isa<ConstantExpr>(V) &&
cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1;
}
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
};
} // end anonymous namespace
// FIXME: can we inherit this from ConstantExpr?
template <>
struct OperandTraits<ConstantPlaceHolder>
: public FixedNumOperandTraits<ConstantPlaceHolder, 1> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantPlaceHolder, Value)
} // end namespace llvm
Error BitcodeReaderValueList::assignValue(unsigned Idx, Value *V,
unsigned TypeID) {
if (Idx == size()) {
@ -78,48 +40,21 @@ Error BitcodeReaderValueList::assignValue(unsigned Idx, Value *V,
return Error::success();
}
// Handle constants and non-constants (e.g. instrs) differently for
// efficiency.
if (Constant *PHC = dyn_cast<Constant>(&*Old.first)) {
ResolveConstants.push_back(std::make_pair(PHC, Idx));
Old.first = V;
Old.second = TypeID;
} else {
// If there was a forward reference to this value, replace it.
Value *PrevVal = Old.first;
if (PrevVal->getType() != V->getType())
return createStringError(
std::errc::illegal_byte_sequence,
"Assigned value does not match type of forward declaration");
Old.first->replaceAllUsesWith(V);
PrevVal->deleteValue();
}
assert(!isa<Constant>(&*Old.first) && "Shouldn't update constant");
// If there was a forward reference to this value, replace it.
Value *PrevVal = Old.first;
if (PrevVal->getType() != V->getType())
return createStringError(
std::errc::illegal_byte_sequence,
"Assigned value does not match type of forward declaration");
Old.first->replaceAllUsesWith(V);
PrevVal->deleteValue();
return Error::success();
}
Constant *BitcodeReaderValueList::getConstantFwdRef(unsigned Idx, Type *Ty,
unsigned TyID) {
// Bail out for a clearly invalid value.
if (Idx >= RefsUpperBound)
return nullptr;
if (Idx >= size())
resize(Idx + 1);
if (Value *V = ValuePtrs[Idx].first) {
if (Ty != V->getType())
report_fatal_error("Type mismatch in constant table!");
return cast<Constant>(V);
}
// Create and return a placeholder, which will later be RAUW'd.
Constant *C = new ConstantPlaceHolder(Ty, Context);
ValuePtrs[Idx] = {C, TyID};
return C;
}
Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty,
unsigned TyID) {
unsigned TyID,
BasicBlock *ConstExprInsertBB) {
// Bail out for a clearly invalid value.
if (Idx >= RefsUpperBound)
return nullptr;
@ -131,7 +66,14 @@ Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty,
// If the types don't match, it's invalid.
if (Ty && Ty != V->getType())
return nullptr;
return V;
Expected<Value *> MaybeV = MaterializeValueFn(Idx, ConstExprInsertBB);
if (!MaybeV) {
// TODO: We might want to propagate the precise error message here.
consumeError(MaybeV.takeError());
return nullptr;
}
return MaybeV.get();
}
// No type specified, must be invalid reference.
@ -143,83 +85,3 @@ Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty,
ValuePtrs[Idx] = {V, TyID};
return V;
}
/// Once all constants are read, this method bulk resolves any forward
/// references. The idea behind this is that we sometimes get constants (such
/// as large arrays) which reference *many* forward ref constants. Replacing
/// each of these causes a lot of thrashing when building/reuniquing the
/// constant. Instead of doing this, we look at all the uses and rewrite all
/// the place holders at once for any constant that uses a placeholder.
void BitcodeReaderValueList::resolveConstantForwardRefs() {
// Sort the values by-pointer so that they are efficient to look up with a
// binary search.
llvm::sort(ResolveConstants);
SmallVector<Constant *, 64> NewOps;
while (!ResolveConstants.empty()) {
Value *RealVal = operator[](ResolveConstants.back().second);
Constant *Placeholder = ResolveConstants.back().first;
ResolveConstants.pop_back();
// Loop over all users of the placeholder, updating them to reference the
// new value. If they reference more than one placeholder, update them all
// at once.
while (!Placeholder->use_empty()) {
auto UI = Placeholder->user_begin();
User *U = *UI;
// If the using object isn't uniqued, just update the operands. This
// handles instructions and initializers for global variables.
if (!isa<Constant>(U) || isa<GlobalValue>(U)) {
UI.getUse().set(RealVal);
continue;
}
// Otherwise, we have a constant that uses the placeholder. Replace that
// constant with a new constant that has *all* placeholder uses updated.
Constant *UserC = cast<Constant>(U);
for (User::op_iterator I = UserC->op_begin(), E = UserC->op_end(); I != E;
++I) {
Value *NewOp;
if (!isa<ConstantPlaceHolder>(*I)) {
// Not a placeholder reference.
NewOp = *I;
} else if (*I == Placeholder) {
// Common case is that it just references this one placeholder.
NewOp = RealVal;
} else {
// Otherwise, look up the placeholder in ResolveConstants.
ResolveConstantsTy::iterator It = llvm::lower_bound(
ResolveConstants,
std::pair<Constant *, unsigned>(cast<Constant>(*I), 0));
assert(It != ResolveConstants.end() && It->first == *I);
NewOp = operator[](It->second);
}
NewOps.push_back(cast<Constant>(NewOp));
}
// Make the new constant.
Constant *NewC;
if (ConstantArray *UserCA = dyn_cast<ConstantArray>(UserC)) {
NewC = ConstantArray::get(UserCA->getType(), NewOps);
} else if (ConstantStruct *UserCS = dyn_cast<ConstantStruct>(UserC)) {
NewC = ConstantStruct::get(UserCS->getType(), NewOps);
} else if (isa<ConstantVector>(UserC)) {
NewC = ConstantVector::get(NewOps);
} else {
assert(isa<ConstantExpr>(UserC) && "Must be a ConstantExpr.");
NewC = cast<ConstantExpr>(UserC)->getWithOperands(NewOps);
}
UserC->replaceAllUsesWith(NewC);
UserC->destroyConstant();
NewOps.clear();
}
// Update all ValueHandles, they should be the only users at this point.
Placeholder->replaceAllUsesWith(RealVal);
delete cast<ConstantPlaceHolder>(Placeholder);
}
}

View File

@ -22,7 +22,7 @@ namespace llvm {
class Constant;
class Error;
class LLVMContext;
template <typename T> class Expected;
class Type;
class Value;
@ -30,30 +30,20 @@ class BitcodeReaderValueList {
/// Maps Value ID to pair of Value* and Type ID.
std::vector<std::pair<WeakTrackingVH, unsigned>> ValuePtrs;
/// As we resolve forward-referenced constants, we add information about them
/// to this vector. This allows us to resolve them in bulk instead of
/// resolving each reference at a time. See the code in
/// ResolveConstantForwardRefs for more information about this.
///
/// The key of this vector is the placeholder constant, the value is the slot
/// number that holds the resolved value.
using ResolveConstantsTy = std::vector<std::pair<Constant *, unsigned>>;
ResolveConstantsTy ResolveConstants;
LLVMContext &Context;
/// Maximum number of valid references. Forward references exceeding the
/// maximum must be invalid.
unsigned RefsUpperBound;
public:
BitcodeReaderValueList(LLVMContext &C, size_t RefsUpperBound)
: Context(C),
RefsUpperBound(std::min((size_t)std::numeric_limits<unsigned>::max(),
RefsUpperBound)) {}
using MaterializeValueFnTy =
std::function<Expected<Value *>(unsigned, BasicBlock *)>;
MaterializeValueFnTy MaterializeValueFn;
~BitcodeReaderValueList() {
assert(ResolveConstants.empty() && "Constants not resolved?");
}
public:
BitcodeReaderValueList(size_t RefsUpperBound,
MaterializeValueFnTy MaterializeValueFn)
: RefsUpperBound(std::min((size_t)std::numeric_limits<unsigned>::max(),
RefsUpperBound)),
MaterializeValueFn(MaterializeValueFn) {}
// vector compatibility methods
unsigned size() const { return ValuePtrs.size(); }
@ -65,7 +55,6 @@ public:
}
void clear() {
assert(ResolveConstants.empty() && "Constants not resolved?");
ValuePtrs.clear();
}
@ -90,14 +79,15 @@ public:
ValuePtrs.resize(N);
}
Constant *getConstantFwdRef(unsigned Idx, Type *Ty, unsigned TyID);
Value *getValueFwdRef(unsigned Idx, Type *Ty, unsigned TyID);
void replaceValueWithoutRAUW(unsigned ValNo, Value *NewV) {
assert(ValNo < ValuePtrs.size());
ValuePtrs[ValNo].first = NewV;
}
Value *getValueFwdRef(unsigned Idx, Type *Ty, unsigned TyID,
BasicBlock *ConstExprInsertBB);
Error assignValue(unsigned Idx, Value *V, unsigned TypeID);
/// Once all constants are read, this method bulk resolves any forward
/// references.
void resolveConstantForwardRefs();
};
} // end namespace llvm

View File

@ -50,17 +50,12 @@ namespace {
struct OrderMap {
DenseMap<const Value *, std::pair<unsigned, bool>> IDs;
unsigned LastGlobalConstantID = 0;
unsigned LastGlobalValueID = 0;
OrderMap() = default;
bool isGlobalConstant(unsigned ID) const {
return ID <= LastGlobalConstantID;
}
bool isGlobalValue(unsigned ID) const {
return ID <= LastGlobalValueID && !isGlobalConstant(ID);
return ID <= LastGlobalValueID;
}
unsigned size() const { return IDs.size(); }
@ -84,7 +79,7 @@ static void orderValue(const Value *V, OrderMap &OM) {
return;
if (const Constant *C = dyn_cast<Constant>(V)) {
if (C->getNumOperands() && !isa<GlobalValue>(C)) {
if (C->getNumOperands()) {
for (const Value *Op : C->operands())
if (!isa<BasicBlock>(Op) && !isa<GlobalValue>(Op))
orderValue(Op, OM);
@ -104,39 +99,40 @@ static OrderMap orderModule(const Module &M) {
// and ValueEnumerator::incorporateFunction().
OrderMap OM;
// In the reader, initializers of GlobalValues are set *after* all the
// globals have been read. Rather than awkwardly modeling this behaviour
// directly in predictValueUseListOrderImpl(), just assign IDs to
// initializers of GlobalValues before GlobalValues themselves to model this
// implicitly.
for (const GlobalVariable &G : M.globals())
if (G.hasInitializer())
if (!isa<GlobalValue>(G.getInitializer()))
orderValue(G.getInitializer(), OM);
for (const GlobalAlias &A : M.aliases())
if (!isa<GlobalValue>(A.getAliasee()))
orderValue(A.getAliasee(), OM);
for (const GlobalIFunc &I : M.ifuncs())
if (!isa<GlobalValue>(I.getResolver()))
orderValue(I.getResolver(), OM);
for (const Function &F : M) {
for (const Use &U : F.operands())
if (!isa<GlobalValue>(U.get()))
orderValue(U.get(), OM);
}
// Initializers of GlobalValues are processed in
// BitcodeReader::ResolveGlobalAndAliasInits(). Match the order there rather
// than ValueEnumerator, and match the code in predictValueUseListOrderImpl()
// by giving IDs in reverse order.
//
// Since GlobalValues never reference each other directly (just through
// initializers), their relative IDs only matter for determining order of
// uses in their initializers.
for (const GlobalVariable &G : reverse(M.globals()))
orderValue(&G, OM);
for (const GlobalAlias &A : reverse(M.aliases()))
orderValue(&A, OM);
for (const GlobalIFunc &I : reverse(M.ifuncs()))
orderValue(&I, OM);
for (const Function &F : reverse(M))
orderValue(&F, OM);
OM.LastGlobalValueID = OM.size();
// As constants used in metadata operands are emitted as module-level
// constants, we must order them before other operands. Also, we must order
// these before global values, as these will be read before setting the
// global values' initializers. The latter matters for constants which have
// uses towards other constants that are used as initializers.
auto orderConstantValue = [&OM](const Value *V) {
if ((isa<Constant>(V) && !isa<GlobalValue>(V)) || isa<InlineAsm>(V))
if (isa<Constant>(V) || isa<InlineAsm>(V))
orderValue(V, OM);
};
for (const Function &F : M) {
if (F.isDeclaration())
continue;
// Here we need to match the union of ValueEnumerator::incorporateFunction()
// and WriteFunction(). Basic blocks are implicitly declared before
// anything else (by declaring their size).
for (const BasicBlock &BB : F)
orderValue(&BB, OM);
// Metadata used by instructions is decoded before the actual instructions,
// so visit any constants used by it beforehand.
for (const BasicBlock &BB : F)
for (const Instruction &I : BB)
for (const Value *V : I.operands()) {
@ -151,49 +147,17 @@ static OrderMap orderModule(const Module &M) {
}
}
}
}
OM.LastGlobalConstantID = OM.size();
// Initializers of GlobalValues are processed in
// BitcodeReader::ResolveGlobalAndAliasInits(). Match the order there rather
// than ValueEnumerator, and match the code in predictValueUseListOrderImpl()
// by giving IDs in reverse order.
//
// Since GlobalValues never reference each other directly (just through
// initializers), their relative IDs only matter for determining order of
// uses in their initializers.
for (const Function &F : M)
orderValue(&F, OM);
for (const GlobalAlias &A : M.aliases())
orderValue(&A, OM);
for (const GlobalIFunc &I : M.ifuncs())
orderValue(&I, OM);
for (const GlobalVariable &G : M.globals())
orderValue(&G, OM);
OM.LastGlobalValueID = OM.size();
for (const Function &F : M) {
if (F.isDeclaration())
continue;
// Here we need to match the union of ValueEnumerator::incorporateFunction()
// and WriteFunction(). Basic blocks are implicitly declared before
// anything else (by declaring their size).
for (const BasicBlock &BB : F)
orderValue(&BB, OM);
for (const Argument &A : F.args())
orderValue(&A, OM);
for (const BasicBlock &BB : F)
for (const Instruction &I : BB) {
for (const Value *Op : I.operands())
if ((isa<Constant>(*Op) && !isa<GlobalValue>(*Op)) ||
isa<InlineAsm>(*Op))
orderValue(Op, OM);
orderConstantValue(Op);
if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
orderValue(SVI->getShuffleMaskForBitcode(), OM);
}
for (const BasicBlock &BB : F)
for (const Instruction &I : BB)
orderValue(&I, OM);
}
}
return OM;
}
@ -223,18 +187,6 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
auto LID = OM.lookup(LU->getUser()).first;
auto RID = OM.lookup(RU->getUser()).first;
// Global values are processed in reverse order.
//
// Moreover, initializers of GlobalValues are set *after* all the globals
// have been read (despite having earlier IDs). Rather than awkwardly
// modeling this behaviour here, orderModule() has assigned IDs to
// initializers of GlobalValues before GlobalValues themselves.
if (OM.isGlobalValue(LID) && OM.isGlobalValue(RID)) {
if (LID == RID)
return LU->getOperandNo() > RU->getOperandNo();
return LID < RID;
}
// If ID is 4, then expect: 7 6 5 1 2 3.
if (LID < RID) {
if (RID <= ID)
@ -317,16 +269,25 @@ static UseListOrderStack predictUseListOrder(const Module &M) {
predictValueUseListOrder(&A, &F, OM, Stack);
for (const BasicBlock &BB : F)
for (const Instruction &I : BB) {
for (const Value *Op : I.operands())
for (const Value *Op : I.operands()) {
if (isa<Constant>(*Op) || isa<InlineAsm>(*Op)) // Visit GlobalValues.
predictValueUseListOrder(Op, &F, OM, Stack);
if (const auto *MAV = dyn_cast<MetadataAsValue>(Op)) {
if (const auto *VAM =
dyn_cast<ValueAsMetadata>(MAV->getMetadata())) {
predictValueUseListOrder(VAM->getValue(), &F, OM, Stack);
} else if (const auto *AL =
dyn_cast<DIArgList>(MAV->getMetadata())) {
for (const auto *VAM : AL->getArgs())
predictValueUseListOrder(VAM->getValue(), &F, OM, Stack);
}
}
}
if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
predictValueUseListOrder(SVI->getShuffleMaskForBitcode(), &F, OM,
Stack);
}
for (const BasicBlock &BB : F)
for (const Instruction &I : BB)
predictValueUseListOrder(&I, &F, OM, Stack);
}
}
// Visit globals last, since the module-level use-list block will be seen

View File

@ -0,0 +1,224 @@
; RUN: llvm-as < %s | llvm-dis -expand-constant-exprs | FileCheck %s
@g = extern_weak global i32
@g2 = extern_weak global i32
define i64 @test_cast() {
; CHECK-LABEL: define i64 @test_cast() {
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: ret i64 %constexpr
ret i64 ptrtoint (ptr @g to i64)
}
define i1 @test_icmp() {
; CHECK-LABEL: define i1 @test_icmp() {
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: %constexpr1 = icmp ne i64 %constexpr, 0
; CHECK-NEXT: ret i1 %constexpr1
ret i1 icmp ne (i64 ptrtoint (ptr @g to i64), i64 0)
}
define i32 @test_select() {
; CHECK-LABEL: define i32 @test_select() {
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: %constexpr1 = icmp ne i64 %constexpr, 0
; CHECK-NEXT: %constexpr2 = select i1 %constexpr1, i32 1, i32 2
; CHECK-NEXT: ret i32 %constexpr2
ret i32 select (i1 icmp ne (i64 ptrtoint (ptr @g to i64), i64 0), i32 1, i32 2)
}
define i8 @test_extractelement() {
; CHECK-LABEL: define i8 @test_extractelement() {
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: %constexpr1 = icmp ne i64 %constexpr, 0
; CHECK-NEXT: %constexpr2 = select i1 %constexpr1, <2 x i8> zeroinitializer, <2 x i8> <i8 0, i8 1>
; CHECK-NEXT: %constexpr3 = extractelement <2 x i8> %constexpr2, i32 0
; CHECK-NEXT: ret i8 %constexpr3
ret i8 extractelement (<2 x i8> select (i1 icmp ne (i64 ptrtoint (ptr @g to i64), i64 0), <2 x i8> zeroinitializer, <2 x i8> <i8 0, i8 1>), i32 0)
}
define <2 x i8> @test_insertelement() {
; CHECK-LABEL: define <2 x i8> @test_insertelement() {
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i32
; CHECK-NEXT: %constexpr1 = insertelement <2 x i8> poison, i8 42, i32 %constexpr
; CHECK-NEXT: ret <2 x i8> %constexpr1
ret <2 x i8> insertelement (<2 x i8> poison, i8 42, i32 ptrtoint (ptr @g to i32))
}
define double @test_fneg() {
; CHECK-LABEL: define double @test_fneg() {
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: %constexpr1 = bitcast i64 %constexpr to double
; CHECK-NEXT: %constexpr2 = fneg double %constexpr1
ret double fneg (double bitcast (i64 ptrtoint (ptr @g to i64) to double))
}
define i64 @test_flags() {
; CHECK-LABEL: define i64 @test_flags() {
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: %constexpr1 = add nuw i64 %constexpr, 1
; CHECK-NEXT: ret i64 %constexpr1
ret i64 add nuw (i64 ptrtoint (ptr @g to i64), i64 1)
}
define <3 x i64> @test_vector() {
; CHECK-LABEL: define <3 x i64> @test_vector() {
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: %constexpr.ins = insertelement <3 x i64> poison, i64 5, i32 0
; CHECK-NEXT: %constexpr.ins1 = insertelement <3 x i64> %constexpr.ins, i64 %constexpr, i32 1
; CHECK-NEXT: %constexpr.ins2 = insertelement <3 x i64> %constexpr.ins1, i64 7, i32 2
ret <3 x i64> <i64 5, i64 ptrtoint (ptr @g to i64), i64 7>
}
define i64 @test_reused_expr() {
; CHECK-LABEL: define i64 @test_reused_expr() {
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: %constexpr1 = add i64 %constexpr, %constexpr
; CHECK-NEXT: ret i64 %constexpr1
ret i64 add (i64 ptrtoint (ptr @g to i64), i64 ptrtoint (ptr @g to i64))
}
define i64 @test_multiple_expanded_operands() {
; CHECK-LABEL: define i64 @test_multiple_expanded_operands() {
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: %constexpr1 = ptrtoint ptr @g2 to i64
; CHECK-NEXT: %constexpr2 = add i64 %constexpr, %constexpr1
; CHECK-NEXT: ret i64 %constexpr2
ret i64 add (i64 ptrtoint (ptr @g to i64), i64 ptrtoint (ptr @g2 to i64))
}
define i64 @test_mid_block(i64 %arg) {
; CHECK-LABEL: define i64 @test_mid_block(i64 %arg) {
; CHECK-NEXT: %x = mul i64 %arg, 3
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: %add = add i64 %x, %constexpr
; CHECK-NEXT: ret i64 %add
%x = mul i64 %arg, 3
%add = add i64 %x, ptrtoint (ptr @g to i64)
ret i64 %add
}
define i64 @test_phi_non_critical_edge_block_before(i1 %c) {
; CHECK-LABEL: define i64 @test_phi_non_critical_edge_block_before(i1 %c) {
; CHECK: entry:
; CHECK-NEXT: br i1 %c, label %if, label %join
; CHECK: if:
; CHECK-NEXT: br label %phi.constexpr
; CHECK: phi.constexpr:
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: br label %join
; CHECK: join:
; CHECK-NEXT: %phi = phi i64 [ 0, %entry ], [ %constexpr, %phi.constexpr ]
; CHECK-NEXT: ret i64 %phi
entry:
br i1 %c, label %if, label %join
if:
br label %join
join:
%phi = phi i64 [ 0, %entry ], [ ptrtoint (ptr @g to i64), %if ]
ret i64 %phi
}
define i64 @test_phi_non_critical_edge_block_after(i1 %c) {
; CHECK-LABEL: define i64 @test_phi_non_critical_edge_block_after(i1 %c) {
; CHECK: entry:
; CHECK-NEXT: br i1 %c, label %if, label %join
; CHECK: phi.constexpr:
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: br label %join
; CHECK: join:
; CHECK-NEXT: %phi = phi i64 [ 0, %entry ], [ %constexpr, %phi.constexpr ]
; CHECK-NEXT: ret i64 %phi
; CHECK: if:
; CHECK-NEXT: br label %phi.constexpr
entry:
br i1 %c, label %if, label %join
join:
%phi = phi i64 [ 0, %entry ], [ ptrtoint (ptr @g to i64), %if ]
ret i64 %phi
if:
br label %join
}
define i64 @test_phi_critical_edge(i1 %c) {
; CHECK-LABEL: define i64 @test_phi_critical_edge(i1 %c) {
; CHECK: entry:
; CHECK-NEXT: br i1 %c, label %if, label %phi.constexpr
; CHECK: if:
; CHECK-NEXT: br label %join
; CHECK: phi.constexpr:
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: br label %join
; CHECK: join:
; CHECK-NEXT: %phi = phi i64 [ %constexpr, %phi.constexpr ], [ 0, %if ]
; CHECK-NEXT: ret i64 %phi
entry:
br i1 %c, label %if, label %join
if:
br label %join
join:
%phi = phi i64 [ ptrtoint (ptr @g to i64), %entry ], [ 0, %if ]
ret i64 %phi
}
define i64 @test_phi_multiple_nodes(i1 %c) {
; CHECK-LABEL: define i64 @test_phi_multiple_nodes(i1 %c) {
; CHECK: entry:
; CHECK-NEXT: br i1 %c, label %if, label %join
; CHECK: if:
; CHECK-NEXT: br label %phi.constexpr
; CHECK: phi.constexpr:
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: %constexpr2 = ptrtoint ptr @g2 to i64
; CHECK-NEXT: br label %join
; CHECK: join:
; CHECK-NEXT: %phi = phi i64 [ 0, %entry ], [ %constexpr, %phi.constexpr ]
; CHECK-NEXT: %phi2 = phi i64 [ 0, %entry ], [ %constexpr2, %phi.constexpr ]
; CHECK-NEXT: ret i64 %phi
entry:
br i1 %c, label %if, label %join
if:
br label %join
join:
%phi = phi i64 [ 0, %entry ], [ ptrtoint (ptr @g to i64), %if ]
%phi2 = phi i64 [ 0, %entry ], [ ptrtoint (ptr @g2 to i64), %if ]
ret i64 %phi
}
define i64 @test_phi_multiple_identical_predecessors(i32 %x) {
; CHECK-LABEL: define i64 @test_phi_multiple_identical_predecessors(i32 %x) {
; CHECK: entry:
; CHECK-NEXT: switch i32 %x, label %default [
; CHECK-NEXT: i32 0, label %phi.constexpr
; CHECK-NEXT: i32 1, label %phi.constexpr
; CHECK-NEXT: ]
; CHECK: default:
; CHECK-NEXT: br label %join
; CHECK: phi.constexpr:
; CHECK-NEXT: %constexpr = ptrtoint ptr @g to i64
; CHECK-NEXT: br label %join
; CHECK: join:
; CHECK-NEXT: %phi = phi i64 [ %constexpr, %phi.constexpr ], [ %constexpr, %phi.constexpr ], [ 0, %default ]
; CHECK-NEXT: ret i64 %phi
entry:
switch i32 %x, label %default [
i32 0, label %join
i32 1, label %join
]
default:
br label %join
join:
%phi = phi i64 [ ptrtoint (ptr @g to i64), %entry ], [ ptrtoint (ptr @g to i64), %entry ], [ 0, %default ]
ret i64 %phi
}