2019-02-08 05:27:23 +08:00
|
|
|
//===- InstCombineAtomicRMW.cpp -------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the visit functions for atomic rmw instructions.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "InstCombineInternal.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2019-02-15 02:39:14 +08:00
|
|
|
namespace {
|
|
|
|
/// Return true if and only if the given instruction does not modify the memory
|
|
|
|
/// location referenced. Note that an idemptent atomicrmw may still have
|
|
|
|
/// ordering effects on nearby instructions, or be volatile.
|
|
|
|
/// TODO: Common w/ the version in AtomicExpandPass, and change the term used.
|
|
|
|
/// Idemptotent is confusing in this context.
|
|
|
|
bool isIdempotentRMW(AtomicRMWInst& RMWI) {
|
2019-03-02 02:00:07 +08:00
|
|
|
if (auto CF = dyn_cast<ConstantFP>(RMWI.getValOperand()))
|
|
|
|
switch(RMWI.getOperation()) {
|
|
|
|
case AtomicRMWInst::FAdd: // -0.0
|
|
|
|
return CF->isZero() && CF->isNegative();
|
|
|
|
case AtomicRMWInst::FSub: // +0.0
|
|
|
|
return CF->isZero() && !CF->isNegative();
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
};
|
|
|
|
|
2019-02-15 02:39:14 +08:00
|
|
|
auto C = dyn_cast<ConstantInt>(RMWI.getValOperand());
|
|
|
|
if(!C)
|
|
|
|
return false;
|
|
|
|
|
2019-02-16 05:31:39 +08:00
|
|
|
switch(RMWI.getOperation()) {
|
2019-02-15 02:39:14 +08:00
|
|
|
case AtomicRMWInst::Add:
|
|
|
|
case AtomicRMWInst::Sub:
|
|
|
|
case AtomicRMWInst::Or:
|
|
|
|
case AtomicRMWInst::Xor:
|
|
|
|
return C->isZero();
|
|
|
|
case AtomicRMWInst::And:
|
|
|
|
return C->isMinusOne();
|
|
|
|
case AtomicRMWInst::Min:
|
|
|
|
return C->isMaxValue(true);
|
|
|
|
case AtomicRMWInst::Max:
|
|
|
|
return C->isMinValue(true);
|
|
|
|
case AtomicRMWInst::UMin:
|
|
|
|
return C->isMaxValue(false);
|
|
|
|
case AtomicRMWInst::UMax:
|
|
|
|
return C->isMinValue(false);
|
|
|
|
default:
|
|
|
|
return false;
|
2019-02-08 05:27:23 +08:00
|
|
|
}
|
2019-02-15 02:39:14 +08:00
|
|
|
}
|
|
|
|
|
2019-02-16 05:23:51 +08:00
|
|
|
/// Return true if the given instruction always produces a value in memory
|
2019-03-02 03:50:36 +08:00
|
|
|
/// equivalent to its value operand.
|
2019-02-16 05:23:51 +08:00
|
|
|
bool isSaturating(AtomicRMWInst& RMWI) {
|
2019-03-02 03:50:36 +08:00
|
|
|
if (auto CF = dyn_cast<ConstantFP>(RMWI.getValOperand()))
|
|
|
|
switch(RMWI.getOperation()) {
|
|
|
|
case AtomicRMWInst::FAdd:
|
|
|
|
case AtomicRMWInst::FSub:
|
|
|
|
return CF->isNaN();
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
};
|
|
|
|
|
2019-02-16 05:23:51 +08:00
|
|
|
auto C = dyn_cast<ConstantInt>(RMWI.getValOperand());
|
|
|
|
if(!C)
|
|
|
|
return false;
|
|
|
|
|
2019-02-16 05:31:39 +08:00
|
|
|
switch(RMWI.getOperation()) {
|
2019-02-16 05:23:51 +08:00
|
|
|
default:
|
|
|
|
return false;
|
2019-02-16 05:31:39 +08:00
|
|
|
case AtomicRMWInst::Xchg:
|
|
|
|
return true;
|
2019-02-16 05:23:51 +08:00
|
|
|
case AtomicRMWInst::Or:
|
|
|
|
return C->isAllOnesValue();
|
|
|
|
case AtomicRMWInst::And:
|
|
|
|
return C->isZero();
|
|
|
|
case AtomicRMWInst::Min:
|
|
|
|
return C->isMinValue(true);
|
|
|
|
case AtomicRMWInst::Max:
|
|
|
|
return C->isMaxValue(true);
|
|
|
|
case AtomicRMWInst::UMin:
|
|
|
|
return C->isMinValue(false);
|
|
|
|
case AtomicRMWInst::UMax:
|
|
|
|
return C->isMaxValue(false);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
2019-02-15 02:39:14 +08:00
|
|
|
|
|
|
|
Instruction *InstCombiner::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
|
|
|
|
|
2019-02-15 04:41:17 +08:00
|
|
|
// Volatile RMWs perform a load and a store, we cannot replace this by just a
|
2019-02-16 05:23:51 +08:00
|
|
|
// load or just a store. We chose not to canonicalize out of general paranoia
|
|
|
|
// about user expectations around volatile.
|
2019-02-15 02:39:14 +08:00
|
|
|
if (RMWI.isVolatile())
|
|
|
|
return nullptr;
|
|
|
|
|
2019-02-16 05:23:51 +08:00
|
|
|
// Any atomicrmw op which produces a known result in memory can be
|
|
|
|
// replaced w/an atomicrmw xchg.
|
2019-02-16 05:31:39 +08:00
|
|
|
if (isSaturating(RMWI) &&
|
|
|
|
RMWI.getOperation() != AtomicRMWInst::Xchg) {
|
2019-02-16 05:23:51 +08:00
|
|
|
RMWI.setOperation(AtomicRMWInst::Xchg);
|
|
|
|
return &RMWI;
|
|
|
|
}
|
|
|
|
|
|
|
|
AtomicOrdering Ordering = RMWI.getOrdering();
|
|
|
|
assert(Ordering != AtomicOrdering::NotAtomic &&
|
|
|
|
Ordering != AtomicOrdering::Unordered &&
|
|
|
|
"AtomicRMWs don't make sense with Unordered or NotAtomic");
|
|
|
|
|
|
|
|
// Any atomicrmw xchg with no uses can be converted to a atomic store if the
|
|
|
|
// ordering is compatible.
|
|
|
|
if (RMWI.getOperation() == AtomicRMWInst::Xchg &&
|
|
|
|
RMWI.use_empty()) {
|
|
|
|
if (Ordering != AtomicOrdering::Release &&
|
|
|
|
Ordering != AtomicOrdering::Monotonic)
|
|
|
|
return nullptr;
|
|
|
|
auto *SI = new StoreInst(RMWI.getValOperand(),
|
|
|
|
RMWI.getPointerOperand(), &RMWI);
|
|
|
|
SI->setAtomic(Ordering, RMWI.getSyncScopeID());
|
2019-10-03 21:17:21 +08:00
|
|
|
SI->setAlignment(MaybeAlign(DL.getABITypeAlignment(RMWI.getType())));
|
2019-02-16 05:23:51 +08:00
|
|
|
return eraseInstFromFunction(RMWI);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!isIdempotentRMW(RMWI))
|
|
|
|
return nullptr;
|
|
|
|
|
2019-02-15 04:41:17 +08:00
|
|
|
// We chose to canonicalize all idempotent operations to an single
|
|
|
|
// operation code and constant. This makes it easier for the rest of the
|
2019-03-02 02:00:07 +08:00
|
|
|
// optimizer to match easily. The choices of or w/0 and fadd w/-0.0 are
|
|
|
|
// arbitrary.
|
2019-02-15 04:41:17 +08:00
|
|
|
if (RMWI.getType()->isIntegerTy() &&
|
|
|
|
RMWI.getOperation() != AtomicRMWInst::Or) {
|
|
|
|
RMWI.setOperation(AtomicRMWInst::Or);
|
2020-02-04 04:17:36 +08:00
|
|
|
return replaceOperand(RMWI, 1, ConstantInt::get(RMWI.getType(), 0));
|
2019-03-02 02:00:07 +08:00
|
|
|
} else if (RMWI.getType()->isFloatingPointTy() &&
|
|
|
|
RMWI.getOperation() != AtomicRMWInst::FAdd) {
|
|
|
|
RMWI.setOperation(AtomicRMWInst::FAdd);
|
2020-02-04 04:17:36 +08:00
|
|
|
return replaceOperand(RMWI, 1, ConstantFP::getNegativeZero(RMWI.getType()));
|
2019-02-15 04:41:17 +08:00
|
|
|
}
|
|
|
|
|
2019-02-15 02:39:14 +08:00
|
|
|
// Check if the required ordering is compatible with an atomic load.
|
|
|
|
if (Ordering != AtomicOrdering::Acquire &&
|
|
|
|
Ordering != AtomicOrdering::Monotonic)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
LoadInst *Load = new LoadInst(RMWI.getType(), RMWI.getPointerOperand());
|
|
|
|
Load->setAtomic(Ordering, RMWI.getSyncScopeID());
|
2019-09-30 17:37:05 +08:00
|
|
|
Load->setAlignment(MaybeAlign(DL.getABITypeAlignment(RMWI.getType())));
|
2019-02-15 02:39:14 +08:00
|
|
|
return Load;
|
2019-02-08 05:27:23 +08:00
|
|
|
}
|