2017-09-23 07:46:57 +08:00
|
|
|
//===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===//
|
2014-04-03 19:44:58 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2014-04-03 19:44:58 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains a pass (at IR level) to replace atomic instructions with
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
// __atomic_* library calls, or target specific instruction which implement the
|
|
|
|
// same semantics in a way which better fits the target backend. This can
|
|
|
|
// include the use of (intrinsic-based) load-linked/store-conditional loops,
|
|
|
|
// AtomicCmpXchg, or type coercions.
|
2014-04-03 19:44:58 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-09-23 07:46:57 +08:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
2022-03-15 17:54:19 +08:00
|
|
|
#include "llvm/ADT/STLFunctionalExtras.h"
|
2017-09-23 07:46:57 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2021-08-17 04:56:01 +08:00
|
|
|
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
|
2015-08-03 23:29:47 +08:00
|
|
|
#include "llvm/CodeGen/AtomicExpandUtils.h"
|
2017-09-23 07:46:57 +08:00
|
|
|
#include "llvm/CodeGen/RuntimeLibcalls.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetLowering.h"
|
2017-05-19 01:21:13 +08:00
|
|
|
#include "llvm/CodeGen/TargetPassConfig.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2018-03-30 01:21:10 +08:00
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
2017-09-23 07:46:57 +08:00
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/BasicBlock.h"
|
|
|
|
#include "llvm/IR/Constant.h"
|
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
2014-04-03 19:44:58 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2014-09-04 05:29:59 +08:00
|
|
|
#include "llvm/IR/InstIterator.h"
|
2017-09-23 07:46:57 +08:00
|
|
|
#include "llvm/IR/Instruction.h"
|
2014-04-03 19:44:58 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/Module.h"
|
2017-09-23 07:46:57 +08:00
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/User.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2017-09-23 07:46:57 +08:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/AtomicOrdering.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2014-04-03 19:44:58 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2017-09-23 07:46:57 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2015-12-16 09:24:05 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2014-04-03 19:44:58 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2017-09-23 07:46:57 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <iterator>
|
2014-06-20 05:03:04 +08:00
|
|
|
|
2014-04-03 19:44:58 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-08-22 05:50:01 +08:00
|
|
|
#define DEBUG_TYPE "atomic-expand"
|
2014-04-22 10:02:50 +08:00
|
|
|
|
2014-04-03 19:44:58 +08:00
|
|
|
namespace {
|
2017-09-23 07:46:57 +08:00
|
|
|
|
2022-03-17 23:46:12 +08:00
|
|
|
class AtomicExpand : public FunctionPass {
|
|
|
|
const TargetLowering *TLI = nullptr;
|
2017-09-23 07:46:57 +08:00
|
|
|
|
2022-03-17 23:46:12 +08:00
|
|
|
public:
|
|
|
|
static char ID; // Pass identification, replacement for typeid
|
2017-09-23 07:46:57 +08:00
|
|
|
|
2022-03-17 23:46:12 +08:00
|
|
|
AtomicExpand() : FunctionPass(ID) {
|
|
|
|
initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2014-04-03 19:44:58 +08:00
|
|
|
|
2022-03-17 23:46:12 +08:00
|
|
|
bool runOnFunction(Function &F) override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
|
|
|
|
IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
|
|
|
|
LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
|
|
|
|
bool tryExpandAtomicLoad(LoadInst *LI);
|
|
|
|
bool expandAtomicLoadToLL(LoadInst *LI);
|
|
|
|
bool expandAtomicLoadToCmpXchg(LoadInst *LI);
|
|
|
|
StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
|
2022-03-19 04:26:37 +08:00
|
|
|
void expandAtomicStore(StoreInst *SI);
|
2022-03-17 23:46:12 +08:00
|
|
|
bool tryExpandAtomicRMW(AtomicRMWInst *AI);
|
|
|
|
AtomicRMWInst *convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI);
|
|
|
|
Value *
|
|
|
|
insertRMWLLSCLoop(IRBuilder<> &Builder, Type *ResultTy, Value *Addr,
|
|
|
|
Align AddrAlign, AtomicOrdering MemOpOrder,
|
|
|
|
function_ref<Value *(IRBuilder<> &, Value *)> PerformOp);
|
|
|
|
void
|
|
|
|
expandAtomicOpToLLSC(Instruction *I, Type *ResultTy, Value *Addr,
|
|
|
|
Align AddrAlign, AtomicOrdering MemOpOrder,
|
|
|
|
function_ref<Value *(IRBuilder<> &, Value *)> PerformOp);
|
|
|
|
void expandPartwordAtomicRMW(
|
|
|
|
AtomicRMWInst *I, TargetLoweringBase::AtomicExpansionKind ExpansionKind);
|
|
|
|
AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI);
|
|
|
|
bool expandPartwordCmpXchg(AtomicCmpXchgInst *I);
|
|
|
|
void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI);
|
|
|
|
void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI);
|
|
|
|
|
|
|
|
AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
|
|
|
|
static Value *
|
|
|
|
insertRMWCmpXchgLoop(IRBuilder<> &Builder, Type *ResultType, Value *Addr,
|
|
|
|
Align AddrAlign, AtomicOrdering MemOpOrder,
|
|
|
|
SyncScope::ID SSID,
|
|
|
|
function_ref<Value *(IRBuilder<> &, Value *)> PerformOp,
|
|
|
|
CreateCmpXchgInstFun CreateCmpXchg);
|
|
|
|
bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI);
|
|
|
|
|
|
|
|
bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
|
|
|
|
bool isIdempotentRMW(AtomicRMWInst *RMWI);
|
|
|
|
bool simplifyIdempotentRMW(AtomicRMWInst *RMWI);
|
|
|
|
|
|
|
|
bool expandAtomicOpToLibcall(Instruction *I, unsigned Size, Align Alignment,
|
|
|
|
Value *PointerOperand, Value *ValueOperand,
|
|
|
|
Value *CASExpected, AtomicOrdering Ordering,
|
|
|
|
AtomicOrdering Ordering2,
|
|
|
|
ArrayRef<RTLIB::Libcall> Libcalls);
|
|
|
|
void expandAtomicLoadToLibcall(LoadInst *LI);
|
|
|
|
void expandAtomicStoreToLibcall(StoreInst *LI);
|
|
|
|
void expandAtomicRMWToLibcall(AtomicRMWInst *I);
|
|
|
|
void expandAtomicCASToLibcall(AtomicCmpXchgInst *I);
|
|
|
|
|
|
|
|
friend bool
|
|
|
|
llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
|
|
|
|
CreateCmpXchgInstFun CreateCmpXchg);
|
|
|
|
};
|
2017-09-23 07:46:57 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2014-04-03 19:44:58 +08:00
|
|
|
|
2014-08-22 05:50:01 +08:00
|
|
|
char AtomicExpand::ID = 0;
|
2017-09-23 07:46:57 +08:00
|
|
|
|
2014-08-22 05:50:01 +08:00
|
|
|
char &llvm::AtomicExpandID = AtomicExpand::ID;
|
2017-09-23 07:46:57 +08:00
|
|
|
|
2022-03-17 23:46:12 +08:00
|
|
|
INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions", false,
|
|
|
|
false)
|
2014-04-03 19:44:58 +08:00
|
|
|
|
2017-05-19 01:21:13 +08:00
|
|
|
FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); }
|
2014-04-03 19:44:58 +08:00
|
|
|
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
// Helper functions to retrieve the size of atomic instructions.
|
2017-09-23 07:46:57 +08:00
|
|
|
static unsigned getAtomicOpSize(LoadInst *LI) {
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
const DataLayout &DL = LI->getModule()->getDataLayout();
|
|
|
|
return DL.getTypeStoreSize(LI->getType());
|
|
|
|
}
|
|
|
|
|
2017-09-23 07:46:57 +08:00
|
|
|
static unsigned getAtomicOpSize(StoreInst *SI) {
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
const DataLayout &DL = SI->getModule()->getDataLayout();
|
|
|
|
return DL.getTypeStoreSize(SI->getValueOperand()->getType());
|
|
|
|
}
|
|
|
|
|
2017-09-23 07:46:57 +08:00
|
|
|
static unsigned getAtomicOpSize(AtomicRMWInst *RMWI) {
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
const DataLayout &DL = RMWI->getModule()->getDataLayout();
|
|
|
|
return DL.getTypeStoreSize(RMWI->getValOperand()->getType());
|
|
|
|
}
|
|
|
|
|
2017-09-23 07:46:57 +08:00
|
|
|
static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) {
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
const DataLayout &DL = CASI->getModule()->getDataLayout();
|
|
|
|
return DL.getTypeStoreSize(CASI->getCompareOperand()->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine if a particular atomic operation has a supported size,
|
|
|
|
// and is of appropriate alignment, to be passed through for target
|
|
|
|
// lowering. (Versus turning into a __atomic libcall)
|
|
|
|
template <typename Inst>
|
2017-09-23 07:46:57 +08:00
|
|
|
static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
unsigned Size = getAtomicOpSize(I);
|
2020-06-30 17:54:45 +08:00
|
|
|
Align Alignment = I->getAlign();
|
|
|
|
return Alignment >= Size &&
|
|
|
|
Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
}
|
|
|
|
|
2014-08-22 05:50:01 +08:00
|
|
|
bool AtomicExpand::runOnFunction(Function &F) {
|
2017-05-19 01:21:13 +08:00
|
|
|
auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
|
|
|
|
if (!TPC)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto &TM = TPC->getTM<TargetMachine>();
|
|
|
|
if (!TM.getSubtargetImpl(F)->enableAtomicExpand())
|
2014-04-18 02:22:47 +08:00
|
|
|
return false;
|
2017-05-19 01:21:13 +08:00
|
|
|
TLI = TM.getSubtargetImpl(F)->getTargetLowering();
|
2014-04-18 02:22:47 +08:00
|
|
|
|
2014-04-03 19:44:58 +08:00
|
|
|
SmallVector<Instruction *, 1> AtomicInsts;
|
|
|
|
|
|
|
|
// Changing control-flow while iterating through it is a bad idea, so gather a
|
|
|
|
// list of all atomic instructions before we start.
|
2021-11-15 11:40:48 +08:00
|
|
|
for (Instruction &I : instructions(F))
|
|
|
|
if (I.isAtomic() && !isa<FenceInst>(&I))
|
|
|
|
AtomicInsts.push_back(&I);
|
2014-04-03 19:44:58 +08:00
|
|
|
|
|
|
|
bool MadeChange = false;
|
2014-09-04 05:29:59 +08:00
|
|
|
for (auto I : AtomicInsts) {
|
|
|
|
auto LI = dyn_cast<LoadInst>(I);
|
|
|
|
auto SI = dyn_cast<StoreInst>(I);
|
|
|
|
auto RMWI = dyn_cast<AtomicRMWInst>(I);
|
|
|
|
auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
|
2016-03-28 23:05:30 +08:00
|
|
|
assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction");
|
2014-09-04 05:29:59 +08:00
|
|
|
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
// If the Size/Alignment is not supported, replace with a libcall.
|
|
|
|
if (LI) {
|
|
|
|
if (!atomicSizeSupported(TLI, LI)) {
|
|
|
|
expandAtomicLoadToLibcall(LI);
|
|
|
|
MadeChange = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else if (SI) {
|
|
|
|
if (!atomicSizeSupported(TLI, SI)) {
|
|
|
|
expandAtomicStoreToLibcall(SI);
|
|
|
|
MadeChange = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else if (RMWI) {
|
|
|
|
if (!atomicSizeSupported(TLI, RMWI)) {
|
|
|
|
expandAtomicRMWToLibcall(RMWI);
|
|
|
|
MadeChange = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else if (CASI) {
|
|
|
|
if (!atomicSizeSupported(TLI, CASI)) {
|
|
|
|
expandAtomicCASToLibcall(CASI);
|
|
|
|
MadeChange = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-17 06:12:04 +08:00
|
|
|
if (TLI->shouldInsertFencesForAtomic(I)) {
|
2016-04-07 05:19:33 +08:00
|
|
|
auto FenceOrdering = AtomicOrdering::Monotonic;
|
|
|
|
if (LI && isAcquireOrStronger(LI->getOrdering())) {
|
Add AtomicExpandPass::bracketInstWithFences, and use it whenever getInsertFencesForAtomic would trigger in SelectionDAGBuilder
Summary:
The goal is to eventually remove all the code related to getInsertFencesForAtomic
in SelectionDAGBuilder as it is wrong (designed for ARM, not really portable, works
mostly by accident because the backends are overly conservative), and repeats the
same logic that goes in emitLeading/TrailingFence.
In this patch, I make AtomicExpandPass insert the fences as it knows better
where to put them. Because this requires getting the fences and not just
passing an IRBuilder around, I had to change the return type of
emitLeading/TrailingFence.
This code only triggers on ARM for now. Because it is earlier in the pipeline
than SelectionDAGBuilder, it triggers and lowers atomic accesses to atomic so
SelectionDAGBuilder does not add barriers anymore on ARM.
If this patch is accepted I plan to implement emitLeading/TrailingFence for all
backends that setInsertFencesForAtomic(true), which will allow both making them
less conservative and simplifying SelectionDAGBuilder once they are all using
this interface.
This should not cause any functionnal change so the existing tests are used
and not modified.
Test Plan: make check-all, benefits from existing tests of atomics on ARM
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5179
llvm-svn: 218329
2014-09-24 04:31:14 +08:00
|
|
|
FenceOrdering = LI->getOrdering();
|
2016-04-07 05:19:33 +08:00
|
|
|
LI->setOrdering(AtomicOrdering::Monotonic);
|
|
|
|
} else if (SI && isReleaseOrStronger(SI->getOrdering())) {
|
Add AtomicExpandPass::bracketInstWithFences, and use it whenever getInsertFencesForAtomic would trigger in SelectionDAGBuilder
Summary:
The goal is to eventually remove all the code related to getInsertFencesForAtomic
in SelectionDAGBuilder as it is wrong (designed for ARM, not really portable, works
mostly by accident because the backends are overly conservative), and repeats the
same logic that goes in emitLeading/TrailingFence.
In this patch, I make AtomicExpandPass insert the fences as it knows better
where to put them. Because this requires getting the fences and not just
passing an IRBuilder around, I had to change the return type of
emitLeading/TrailingFence.
This code only triggers on ARM for now. Because it is earlier in the pipeline
than SelectionDAGBuilder, it triggers and lowers atomic accesses to atomic so
SelectionDAGBuilder does not add barriers anymore on ARM.
If this patch is accepted I plan to implement emitLeading/TrailingFence for all
backends that setInsertFencesForAtomic(true), which will allow both making them
less conservative and simplifying SelectionDAGBuilder once they are all using
this interface.
This should not cause any functionnal change so the existing tests are used
and not modified.
Test Plan: make check-all, benefits from existing tests of atomics on ARM
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5179
llvm-svn: 218329
2014-09-24 04:31:14 +08:00
|
|
|
FenceOrdering = SI->getOrdering();
|
2016-04-07 05:19:33 +08:00
|
|
|
SI->setOrdering(AtomicOrdering::Monotonic);
|
|
|
|
} else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) ||
|
|
|
|
isAcquireOrStronger(RMWI->getOrdering()))) {
|
Add AtomicExpandPass::bracketInstWithFences, and use it whenever getInsertFencesForAtomic would trigger in SelectionDAGBuilder
Summary:
The goal is to eventually remove all the code related to getInsertFencesForAtomic
in SelectionDAGBuilder as it is wrong (designed for ARM, not really portable, works
mostly by accident because the backends are overly conservative), and repeats the
same logic that goes in emitLeading/TrailingFence.
In this patch, I make AtomicExpandPass insert the fences as it knows better
where to put them. Because this requires getting the fences and not just
passing an IRBuilder around, I had to change the return type of
emitLeading/TrailingFence.
This code only triggers on ARM for now. Because it is earlier in the pipeline
than SelectionDAGBuilder, it triggers and lowers atomic accesses to atomic so
SelectionDAGBuilder does not add barriers anymore on ARM.
If this patch is accepted I plan to implement emitLeading/TrailingFence for all
backends that setInsertFencesForAtomic(true), which will allow both making them
less conservative and simplifying SelectionDAGBuilder once they are all using
this interface.
This should not cause any functionnal change so the existing tests are used
and not modified.
Test Plan: make check-all, benefits from existing tests of atomics on ARM
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5179
llvm-svn: 218329
2014-09-24 04:31:14 +08:00
|
|
|
FenceOrdering = RMWI->getOrdering();
|
2016-04-07 05:19:33 +08:00
|
|
|
RMWI->setOrdering(AtomicOrdering::Monotonic);
|
2018-09-19 22:51:42 +08:00
|
|
|
} else if (CASI &&
|
|
|
|
TLI->shouldExpandAtomicCmpXchgInIR(CASI) ==
|
|
|
|
TargetLoweringBase::AtomicExpansionKind::None &&
|
2016-04-07 05:19:33 +08:00
|
|
|
(isReleaseOrStronger(CASI->getSuccessOrdering()) ||
|
2021-05-29 05:05:07 +08:00
|
|
|
isAcquireOrStronger(CASI->getSuccessOrdering()) ||
|
|
|
|
isAcquireOrStronger(CASI->getFailureOrdering()))) {
|
Add AtomicExpandPass::bracketInstWithFences, and use it whenever getInsertFencesForAtomic would trigger in SelectionDAGBuilder
Summary:
The goal is to eventually remove all the code related to getInsertFencesForAtomic
in SelectionDAGBuilder as it is wrong (designed for ARM, not really portable, works
mostly by accident because the backends are overly conservative), and repeats the
same logic that goes in emitLeading/TrailingFence.
In this patch, I make AtomicExpandPass insert the fences as it knows better
where to put them. Because this requires getting the fences and not just
passing an IRBuilder around, I had to change the return type of
emitLeading/TrailingFence.
This code only triggers on ARM for now. Because it is earlier in the pipeline
than SelectionDAGBuilder, it triggers and lowers atomic accesses to atomic so
SelectionDAGBuilder does not add barriers anymore on ARM.
If this patch is accepted I plan to implement emitLeading/TrailingFence for all
backends that setInsertFencesForAtomic(true), which will allow both making them
less conservative and simplifying SelectionDAGBuilder once they are all using
this interface.
This should not cause any functionnal change so the existing tests are used
and not modified.
Test Plan: make check-all, benefits from existing tests of atomics on ARM
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5179
llvm-svn: 218329
2014-09-24 04:31:14 +08:00
|
|
|
// If a compare and swap is lowered to LL/SC, we can do smarter fence
|
|
|
|
// insertion, with a stronger one on the success path than on the
|
|
|
|
// failure path. As a result, fence insertion is directly done by
|
|
|
|
// expandAtomicCmpXchg in that case.
|
2021-05-29 05:05:07 +08:00
|
|
|
FenceOrdering = CASI->getMergedOrdering();
|
2016-04-07 05:19:33 +08:00
|
|
|
CASI->setSuccessOrdering(AtomicOrdering::Monotonic);
|
|
|
|
CASI->setFailureOrdering(AtomicOrdering::Monotonic);
|
Add AtomicExpandPass::bracketInstWithFences, and use it whenever getInsertFencesForAtomic would trigger in SelectionDAGBuilder
Summary:
The goal is to eventually remove all the code related to getInsertFencesForAtomic
in SelectionDAGBuilder as it is wrong (designed for ARM, not really portable, works
mostly by accident because the backends are overly conservative), and repeats the
same logic that goes in emitLeading/TrailingFence.
In this patch, I make AtomicExpandPass insert the fences as it knows better
where to put them. Because this requires getting the fences and not just
passing an IRBuilder around, I had to change the return type of
emitLeading/TrailingFence.
This code only triggers on ARM for now. Because it is earlier in the pipeline
than SelectionDAGBuilder, it triggers and lowers atomic accesses to atomic so
SelectionDAGBuilder does not add barriers anymore on ARM.
If this patch is accepted I plan to implement emitLeading/TrailingFence for all
backends that setInsertFencesForAtomic(true), which will allow both making them
less conservative and simplifying SelectionDAGBuilder once they are all using
this interface.
This should not cause any functionnal change so the existing tests are used
and not modified.
Test Plan: make check-all, benefits from existing tests of atomics on ARM
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5179
llvm-svn: 218329
2014-09-24 04:31:14 +08:00
|
|
|
}
|
|
|
|
|
2016-04-07 05:19:33 +08:00
|
|
|
if (FenceOrdering != AtomicOrdering::Monotonic) {
|
2017-05-09 23:27:17 +08:00
|
|
|
MadeChange |= bracketInstWithFences(I, FenceOrdering);
|
Add AtomicExpandPass::bracketInstWithFences, and use it whenever getInsertFencesForAtomic would trigger in SelectionDAGBuilder
Summary:
The goal is to eventually remove all the code related to getInsertFencesForAtomic
in SelectionDAGBuilder as it is wrong (designed for ARM, not really portable, works
mostly by accident because the backends are overly conservative), and repeats the
same logic that goes in emitLeading/TrailingFence.
In this patch, I make AtomicExpandPass insert the fences as it knows better
where to put them. Because this requires getting the fences and not just
passing an IRBuilder around, I had to change the return type of
emitLeading/TrailingFence.
This code only triggers on ARM for now. Because it is earlier in the pipeline
than SelectionDAGBuilder, it triggers and lowers atomic accesses to atomic so
SelectionDAGBuilder does not add barriers anymore on ARM.
If this patch is accepted I plan to implement emitLeading/TrailingFence for all
backends that setInsertFencesForAtomic(true), which will allow both making them
less conservative and simplifying SelectionDAGBuilder once they are all using
this interface.
This should not cause any functionnal change so the existing tests are used
and not modified.
Test Plan: make check-all, benefits from existing tests of atomics on ARM
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5179
llvm-svn: 218329
2014-09-24 04:31:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-12 01:08:28 +08:00
|
|
|
if (LI) {
|
2015-12-16 08:49:36 +08:00
|
|
|
if (LI->getType()->isFloatingPointTy()) {
|
|
|
|
// TODO: add a TLI hook to control this so that each target can
|
|
|
|
// convert to lowering the original type one at a time.
|
|
|
|
LI = convertAtomicLoadToIntegerType(LI);
|
|
|
|
assert(LI->getType()->isIntegerTy() && "invariant broken");
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
|
2015-09-12 01:08:28 +08:00
|
|
|
MadeChange |= tryExpandAtomicLoad(LI);
|
2015-12-16 08:49:36 +08:00
|
|
|
} else if (SI) {
|
|
|
|
if (SI->getValueOperand()->getType()->isFloatingPointTy()) {
|
|
|
|
// TODO: add a TLI hook to control this so that each target can
|
|
|
|
// convert to lowering the original type one at a time.
|
|
|
|
SI = convertAtomicStoreToIntegerType(SI);
|
|
|
|
assert(SI->getValueOperand()->getType()->isIntegerTy() &&
|
|
|
|
"invariant broken");
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
|
|
|
|
2022-03-19 04:26:37 +08:00
|
|
|
if (TLI->shouldExpandAtomicStoreInIR(SI)) {
|
|
|
|
expandAtomicStore(SI);
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
2014-09-26 01:27:43 +08:00
|
|
|
} else if (RMWI) {
|
|
|
|
// There are two different ways of expanding RMW instructions:
|
|
|
|
// - into a load if it is idempotent
|
|
|
|
// - into a Cmpxchg/LL-SC loop otherwise
|
|
|
|
// we try them in that order.
|
Mutate TargetLowering::shouldExpandAtomicRMWInIR to specifically dictate how AtomicRMWInsts are expanded.
Summary:
In PNaCl, most atomic instructions have their own @llvm.nacl.atomic.* function, each one, with a few exceptions, represents a consistent behaviour across all NaCl-supported targets. Unfortunately, the atomic RMW operations nand, [u]min, and [u]max aren't directly represented by any such @llvm.nacl.atomic.* function. This patch refines shouldExpandAtomicRMWInIR in TargetLowering so that a future `Le32TargetLowering` class can selectively inform the caller how the target desires the atomic RMW instruction to be expanded (ie via load-linked/store-conditional for ARM/AArch64, via cmpxchg for X86/others?, or not at all for Mips) if at all.
This does not represent a behavioural change and as such no tests were added.
Patch by: Richard Diamond.
Reviewers: jfb
Reviewed By: jfb
Subscribers: jfb, aemerson, t.p.northover, llvm-commits
Differential Revision: http://reviews.llvm.org/D7713
llvm-svn: 231250
2015-03-04 23:47:57 +08:00
|
|
|
|
|
|
|
if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
|
|
|
|
MadeChange = true;
|
|
|
|
} else {
|
2021-05-29 14:50:34 +08:00
|
|
|
AtomicRMWInst::BinOp Op = RMWI->getOperation();
|
|
|
|
if (Op == AtomicRMWInst::Xchg &&
|
|
|
|
RMWI->getValOperand()->getType()->isFloatingPointTy()) {
|
|
|
|
// TODO: add a TLI hook to control this so that each target can
|
|
|
|
// convert to lowering the original type one at a time.
|
|
|
|
RMWI = convertAtomicXchgToIntegerType(RMWI);
|
|
|
|
assert(RMWI->getValOperand()->getType()->isIntegerTy() &&
|
|
|
|
"invariant broken");
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
2018-08-17 22:03:37 +08:00
|
|
|
unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
|
|
|
|
unsigned ValueSize = getAtomicOpSize(RMWI);
|
|
|
|
if (ValueSize < MinCASSize &&
|
|
|
|
(Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
|
|
|
|
Op == AtomicRMWInst::And)) {
|
|
|
|
RMWI = widenPartwordAtomicRMW(RMWI);
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
|
|
|
|
Mutate TargetLowering::shouldExpandAtomicRMWInIR to specifically dictate how AtomicRMWInsts are expanded.
Summary:
In PNaCl, most atomic instructions have their own @llvm.nacl.atomic.* function, each one, with a few exceptions, represents a consistent behaviour across all NaCl-supported targets. Unfortunately, the atomic RMW operations nand, [u]min, and [u]max aren't directly represented by any such @llvm.nacl.atomic.* function. This patch refines shouldExpandAtomicRMWInIR in TargetLowering so that a future `Le32TargetLowering` class can selectively inform the caller how the target desires the atomic RMW instruction to be expanded (ie via load-linked/store-conditional for ARM/AArch64, via cmpxchg for X86/others?, or not at all for Mips) if at all.
This does not represent a behavioural change and as such no tests were added.
Patch by: Richard Diamond.
Reviewers: jfb
Reviewed By: jfb
Subscribers: jfb, aemerson, t.p.northover, llvm-commits
Differential Revision: http://reviews.llvm.org/D7713
llvm-svn: 231250
2015-03-04 23:47:57 +08:00
|
|
|
MadeChange |= tryExpandAtomicRMW(RMWI);
|
|
|
|
}
|
2016-02-19 08:06:41 +08:00
|
|
|
} else if (CASI) {
|
|
|
|
// TODO: when we're ready to make the change at the IR level, we can
|
|
|
|
// extend convertCmpXchgToInteger for floating point too.
|
|
|
|
assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() &&
|
|
|
|
"unimplemented - floating point not legal at IR level");
|
2022-03-17 23:46:12 +08:00
|
|
|
if (CASI->getCompareOperand()->getType()->isPointerTy()) {
|
2016-02-19 08:06:41 +08:00
|
|
|
// TODO: add a TLI hook to control this so that each target can
|
|
|
|
// convert to lowering the original type one at a time.
|
|
|
|
CASI = convertCmpXchgToIntegerType(CASI);
|
|
|
|
assert(CASI->getCompareOperand()->getType()->isIntegerTy() &&
|
|
|
|
"invariant broken");
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
2016-06-18 02:11:48 +08:00
|
|
|
|
2018-09-19 22:51:42 +08:00
|
|
|
MadeChange |= tryExpandAtomicCmpXchg(CASI);
|
2014-09-04 05:29:59 +08:00
|
|
|
}
|
2014-04-03 19:44:58 +08:00
|
|
|
}
|
|
|
|
return MadeChange;
|
|
|
|
}
|
|
|
|
|
2017-05-09 23:27:17 +08:00
|
|
|
bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) {
|
Add AtomicExpandPass::bracketInstWithFences, and use it whenever getInsertFencesForAtomic would trigger in SelectionDAGBuilder
Summary:
The goal is to eventually remove all the code related to getInsertFencesForAtomic
in SelectionDAGBuilder as it is wrong (designed for ARM, not really portable, works
mostly by accident because the backends are overly conservative), and repeats the
same logic that goes in emitLeading/TrailingFence.
In this patch, I make AtomicExpandPass insert the fences as it knows better
where to put them. Because this requires getting the fences and not just
passing an IRBuilder around, I had to change the return type of
emitLeading/TrailingFence.
This code only triggers on ARM for now. Because it is earlier in the pipeline
than SelectionDAGBuilder, it triggers and lowers atomic accesses to atomic so
SelectionDAGBuilder does not add barriers anymore on ARM.
If this patch is accepted I plan to implement emitLeading/TrailingFence for all
backends that setInsertFencesForAtomic(true), which will allow both making them
less conservative and simplifying SelectionDAGBuilder once they are all using
this interface.
This should not cause any functionnal change so the existing tests are used
and not modified.
Test Plan: make check-all, benefits from existing tests of atomics on ARM
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5179
llvm-svn: 218329
2014-09-24 04:31:14 +08:00
|
|
|
IRBuilder<> Builder(I);
|
|
|
|
|
2017-05-09 23:27:17 +08:00
|
|
|
auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order);
|
Add AtomicExpandPass::bracketInstWithFences, and use it whenever getInsertFencesForAtomic would trigger in SelectionDAGBuilder
Summary:
The goal is to eventually remove all the code related to getInsertFencesForAtomic
in SelectionDAGBuilder as it is wrong (designed for ARM, not really portable, works
mostly by accident because the backends are overly conservative), and repeats the
same logic that goes in emitLeading/TrailingFence.
In this patch, I make AtomicExpandPass insert the fences as it knows better
where to put them. Because this requires getting the fences and not just
passing an IRBuilder around, I had to change the return type of
emitLeading/TrailingFence.
This code only triggers on ARM for now. Because it is earlier in the pipeline
than SelectionDAGBuilder, it triggers and lowers atomic accesses to atomic so
SelectionDAGBuilder does not add barriers anymore on ARM.
If this patch is accepted I plan to implement emitLeading/TrailingFence for all
backends that setInsertFencesForAtomic(true), which will allow both making them
less conservative and simplifying SelectionDAGBuilder once they are all using
this interface.
This should not cause any functionnal change so the existing tests are used
and not modified.
Test Plan: make check-all, benefits from existing tests of atomics on ARM
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5179
llvm-svn: 218329
2014-09-24 04:31:14 +08:00
|
|
|
|
2017-05-09 23:27:17 +08:00
|
|
|
auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order);
|
Add AtomicExpandPass::bracketInstWithFences, and use it whenever getInsertFencesForAtomic would trigger in SelectionDAGBuilder
Summary:
The goal is to eventually remove all the code related to getInsertFencesForAtomic
in SelectionDAGBuilder as it is wrong (designed for ARM, not really portable, works
mostly by accident because the backends are overly conservative), and repeats the
same logic that goes in emitLeading/TrailingFence.
In this patch, I make AtomicExpandPass insert the fences as it knows better
where to put them. Because this requires getting the fences and not just
passing an IRBuilder around, I had to change the return type of
emitLeading/TrailingFence.
This code only triggers on ARM for now. Because it is earlier in the pipeline
than SelectionDAGBuilder, it triggers and lowers atomic accesses to atomic so
SelectionDAGBuilder does not add barriers anymore on ARM.
If this patch is accepted I plan to implement emitLeading/TrailingFence for all
backends that setInsertFencesForAtomic(true), which will allow both making them
less conservative and simplifying SelectionDAGBuilder once they are all using
this interface.
This should not cause any functionnal change so the existing tests are used
and not modified.
Test Plan: make check-all, benefits from existing tests of atomics on ARM
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5179
llvm-svn: 218329
2014-09-24 04:31:14 +08:00
|
|
|
// We have a guard here because not every atomic operation generates a
|
|
|
|
// trailing fence.
|
2017-08-29 22:07:48 +08:00
|
|
|
if (TrailingFence)
|
|
|
|
TrailingFence->moveAfter(I);
|
Add AtomicExpandPass::bracketInstWithFences, and use it whenever getInsertFencesForAtomic would trigger in SelectionDAGBuilder
Summary:
The goal is to eventually remove all the code related to getInsertFencesForAtomic
in SelectionDAGBuilder as it is wrong (designed for ARM, not really portable, works
mostly by accident because the backends are overly conservative), and repeats the
same logic that goes in emitLeading/TrailingFence.
In this patch, I make AtomicExpandPass insert the fences as it knows better
where to put them. Because this requires getting the fences and not just
passing an IRBuilder around, I had to change the return type of
emitLeading/TrailingFence.
This code only triggers on ARM for now. Because it is earlier in the pipeline
than SelectionDAGBuilder, it triggers and lowers atomic accesses to atomic so
SelectionDAGBuilder does not add barriers anymore on ARM.
If this patch is accepted I plan to implement emitLeading/TrailingFence for all
backends that setInsertFencesForAtomic(true), which will allow both making them
less conservative and simplifying SelectionDAGBuilder once they are all using
this interface.
This should not cause any functionnal change so the existing tests are used
and not modified.
Test Plan: make check-all, benefits from existing tests of atomics on ARM
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5179
llvm-svn: 218329
2014-09-24 04:31:14 +08:00
|
|
|
|
|
|
|
return (LeadingFence || TrailingFence);
|
|
|
|
}
|
|
|
|
|
2015-12-16 08:49:36 +08:00
|
|
|
/// Get the iX type with the same bitwidth as T.
|
|
|
|
IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
|
|
|
|
const DataLayout &DL) {
|
2019-05-01 20:37:30 +08:00
|
|
|
EVT VT = TLI->getMemValueType(DL, T);
|
2015-12-16 08:49:36 +08:00
|
|
|
unsigned BitWidth = VT.getStoreSizeInBits();
|
|
|
|
assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
|
|
|
|
return IntegerType::get(T->getContext(), BitWidth);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Convert an atomic load of a non-integral type to an integer load of the
|
2016-02-19 08:06:41 +08:00
|
|
|
/// equivalent bitwidth. See the function comment on
|
2018-07-31 03:41:25 +08:00
|
|
|
/// convertAtomicStoreToIntegerType for background.
|
2015-12-16 08:49:36 +08:00
|
|
|
LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
|
|
|
|
auto *M = LI->getModule();
|
2022-03-17 23:46:12 +08:00
|
|
|
Type *NewTy = getCorrespondingIntegerType(LI->getType(), M->getDataLayout());
|
2015-12-16 08:49:36 +08:00
|
|
|
|
|
|
|
IRBuilder<> Builder(LI);
|
2018-07-31 03:41:25 +08:00
|
|
|
|
2015-12-16 08:49:36 +08:00
|
|
|
Value *Addr = LI->getPointerOperand();
|
2022-03-17 23:46:12 +08:00
|
|
|
Type *PT = PointerType::get(NewTy, Addr->getType()->getPointerAddressSpace());
|
2015-12-16 08:49:36 +08:00
|
|
|
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
|
2018-07-31 03:41:25 +08:00
|
|
|
|
2019-02-02 04:44:24 +08:00
|
|
|
auto *NewLI = Builder.CreateLoad(NewTy, NewAddr);
|
2020-04-07 08:29:25 +08:00
|
|
|
NewLI->setAlignment(LI->getAlign());
|
2015-12-16 08:49:36 +08:00
|
|
|
NewLI->setVolatile(LI->isVolatile());
|
2017-07-12 06:23:00 +08:00
|
|
|
NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
|
|
|
|
|
2015-12-16 08:49:36 +08:00
|
|
|
Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
|
|
|
|
LI->replaceAllUsesWith(NewVal);
|
|
|
|
LI->eraseFromParent();
|
|
|
|
return NewLI;
|
|
|
|
}
|
|
|
|
|
2021-05-29 14:50:34 +08:00
|
|
|
AtomicRMWInst *
|
|
|
|
AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
|
|
|
|
auto *M = RMWI->getModule();
|
|
|
|
Type *NewTy =
|
|
|
|
getCorrespondingIntegerType(RMWI->getType(), M->getDataLayout());
|
|
|
|
|
|
|
|
IRBuilder<> Builder(RMWI);
|
|
|
|
|
|
|
|
Value *Addr = RMWI->getPointerOperand();
|
|
|
|
Value *Val = RMWI->getValOperand();
|
|
|
|
Type *PT = PointerType::get(NewTy, RMWI->getPointerAddressSpace());
|
|
|
|
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
|
|
|
|
Value *NewVal = Builder.CreateBitCast(Val, NewTy);
|
|
|
|
|
|
|
|
auto *NewRMWI =
|
|
|
|
Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, NewAddr, NewVal,
|
|
|
|
RMWI->getAlign(), RMWI->getOrdering());
|
|
|
|
NewRMWI->setVolatile(RMWI->isVolatile());
|
|
|
|
LLVM_DEBUG(dbgs() << "Replaced " << *RMWI << " with " << *NewRMWI << "\n");
|
|
|
|
|
|
|
|
Value *NewRVal = Builder.CreateBitCast(NewRMWI, RMWI->getType());
|
|
|
|
RMWI->replaceAllUsesWith(NewRVal);
|
|
|
|
RMWI->eraseFromParent();
|
|
|
|
return NewRMWI;
|
|
|
|
}
|
|
|
|
|
2015-09-12 01:08:28 +08:00
|
|
|
bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
|
|
|
|
switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
|
|
|
|
case TargetLoweringBase::AtomicExpansionKind::None:
|
|
|
|
return false;
|
2015-12-03 02:12:57 +08:00
|
|
|
case TargetLoweringBase::AtomicExpansionKind::LLSC:
|
2016-06-18 02:11:48 +08:00
|
|
|
expandAtomicOpToLLSC(
|
2021-02-09 12:07:12 +08:00
|
|
|
LI, LI->getType(), LI->getPointerOperand(), LI->getAlign(),
|
|
|
|
LI->getOrdering(),
|
2015-12-03 02:12:57 +08:00
|
|
|
[](IRBuilder<> &Builder, Value *Loaded) { return Loaded; });
|
2016-06-18 02:11:48 +08:00
|
|
|
return true;
|
2015-12-03 02:12:57 +08:00
|
|
|
case TargetLoweringBase::AtomicExpansionKind::LLOnly:
|
2014-09-24 04:59:25 +08:00
|
|
|
return expandAtomicLoadToLL(LI);
|
2015-12-03 02:12:57 +08:00
|
|
|
case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
|
2014-09-24 04:59:25 +08:00
|
|
|
return expandAtomicLoadToCmpXchg(LI);
|
2018-09-19 18:54:22 +08:00
|
|
|
default:
|
|
|
|
llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
|
2015-09-12 01:08:28 +08:00
|
|
|
}
|
2014-09-24 04:59:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
|
2014-09-04 05:01:03 +08:00
|
|
|
IRBuilder<> Builder(LI);
|
2014-04-03 19:44:58 +08:00
|
|
|
|
Add AtomicExpandPass::bracketInstWithFences, and use it whenever getInsertFencesForAtomic would trigger in SelectionDAGBuilder
Summary:
The goal is to eventually remove all the code related to getInsertFencesForAtomic
in SelectionDAGBuilder as it is wrong (designed for ARM, not really portable, works
mostly by accident because the backends are overly conservative), and repeats the
same logic that goes in emitLeading/TrailingFence.
In this patch, I make AtomicExpandPass insert the fences as it knows better
where to put them. Because this requires getting the fences and not just
passing an IRBuilder around, I had to change the return type of
emitLeading/TrailingFence.
This code only triggers on ARM for now. Because it is earlier in the pipeline
than SelectionDAGBuilder, it triggers and lowers atomic accesses to atomic so
SelectionDAGBuilder does not add barriers anymore on ARM.
If this patch is accepted I plan to implement emitLeading/TrailingFence for all
backends that setInsertFencesForAtomic(true), which will allow both making them
less conservative and simplifying SelectionDAGBuilder once they are all using
this interface.
This should not cause any functionnal change so the existing tests are used
and not modified.
Test Plan: make check-all, benefits from existing tests of atomics on ARM
Reviewers: jfb, t.p.northover
Subscribers: aemerson, llvm-commits
Differential Revision: http://reviews.llvm.org/D5179
llvm-svn: 218329
2014-09-24 04:31:14 +08:00
|
|
|
// On some architectures, load-linked instructions are atomic for larger
|
|
|
|
// sizes than normal loads. For example, the only 64-bit load guaranteed
|
|
|
|
// to be single-copy atomic by ARM is an ldrexd (A3.5.3).
|
2021-07-02 23:20:41 +08:00
|
|
|
Value *Val = TLI->emitLoadLinked(Builder, LI->getType(),
|
|
|
|
LI->getPointerOperand(), LI->getOrdering());
|
2015-12-03 02:12:57 +08:00
|
|
|
TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
|
2014-04-03 19:44:58 +08:00
|
|
|
|
|
|
|
LI->replaceAllUsesWith(Val);
|
|
|
|
LI->eraseFromParent();
|
2014-09-24 04:59:25 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
|
|
|
|
IRBuilder<> Builder(LI);
|
|
|
|
AtomicOrdering Order = LI->getOrdering();
|
2019-03-20 01:20:49 +08:00
|
|
|
if (Order == AtomicOrdering::Unordered)
|
|
|
|
Order = AtomicOrdering::Monotonic;
|
|
|
|
|
2014-09-24 04:59:25 +08:00
|
|
|
Value *Addr = LI->getPointerOperand();
|
2021-06-01 07:11:03 +08:00
|
|
|
Type *Ty = LI->getType();
|
2014-09-24 04:59:25 +08:00
|
|
|
Constant *DummyVal = Constant::getNullValue(Ty);
|
|
|
|
|
|
|
|
Value *Pair = Builder.CreateAtomicCmpXchg(
|
2021-02-09 12:07:12 +08:00
|
|
|
Addr, DummyVal, DummyVal, LI->getAlign(), Order,
|
2014-09-24 04:59:25 +08:00
|
|
|
AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
|
|
|
|
Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
|
|
|
|
|
|
|
|
LI->replaceAllUsesWith(Loaded);
|
|
|
|
LI->eraseFromParent();
|
2014-04-03 19:44:58 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-12-16 08:49:36 +08:00
|
|
|
/// Convert an atomic store of a non-integral type to an integer store of the
|
2016-02-19 08:06:41 +08:00
|
|
|
/// equivalent bitwidth. We used to not support floating point or vector
|
2015-12-16 08:49:36 +08:00
|
|
|
/// atomics in the IR at all. The backends learned to deal with the bitcast
|
|
|
|
/// idiom because that was the only way of expressing the notion of a atomic
|
|
|
|
/// float or vector store. The long term plan is to teach each backend to
|
|
|
|
/// instruction select from the original atomic store, but as a migration
|
|
|
|
/// mechanism, we convert back to the old format which the backends understand.
|
|
|
|
/// Each backend will need individual work to recognize the new format.
|
|
|
|
StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
|
|
|
|
IRBuilder<> Builder(SI);
|
|
|
|
auto *M = SI->getModule();
|
|
|
|
Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
|
|
|
|
M->getDataLayout());
|
|
|
|
Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
|
2018-07-31 03:41:25 +08:00
|
|
|
|
2015-12-16 08:49:36 +08:00
|
|
|
Value *Addr = SI->getPointerOperand();
|
2022-03-17 23:46:12 +08:00
|
|
|
Type *PT = PointerType::get(NewTy, Addr->getType()->getPointerAddressSpace());
|
2015-12-16 08:49:36 +08:00
|
|
|
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
|
|
|
|
|
|
|
|
StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
|
2020-04-07 08:29:25 +08:00
|
|
|
NewSI->setAlignment(SI->getAlign());
|
2015-12-16 08:49:36 +08:00
|
|
|
NewSI->setVolatile(SI->isVolatile());
|
2017-07-12 06:23:00 +08:00
|
|
|
NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID());
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
|
2015-12-16 08:49:36 +08:00
|
|
|
SI->eraseFromParent();
|
|
|
|
return NewSI;
|
|
|
|
}
|
|
|
|
|
2022-03-19 04:26:37 +08:00
|
|
|
void AtomicExpand::expandAtomicStore(StoreInst *SI) {
|
2014-09-17 08:06:58 +08:00
|
|
|
// This function is only called on atomic stores that are too large to be
|
|
|
|
// atomic if implemented as a native store. So we replace them by an
|
|
|
|
// atomic swap, that can be implemented for example as a ldrex/strex on ARM
|
|
|
|
// or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
|
Mutate TargetLowering::shouldExpandAtomicRMWInIR to specifically dictate how AtomicRMWInsts are expanded.
Summary:
In PNaCl, most atomic instructions have their own @llvm.nacl.atomic.* function, each one, with a few exceptions, represents a consistent behaviour across all NaCl-supported targets. Unfortunately, the atomic RMW operations nand, [u]min, and [u]max aren't directly represented by any such @llvm.nacl.atomic.* function. This patch refines shouldExpandAtomicRMWInIR in TargetLowering so that a future `Le32TargetLowering` class can selectively inform the caller how the target desires the atomic RMW instruction to be expanded (ie via load-linked/store-conditional for ARM/AArch64, via cmpxchg for X86/others?, or not at all for Mips) if at all.
This does not represent a behavioural change and as such no tests were added.
Patch by: Richard Diamond.
Reviewers: jfb
Reviewed By: jfb
Subscribers: jfb, aemerson, t.p.northover, llvm-commits
Differential Revision: http://reviews.llvm.org/D7713
llvm-svn: 231250
2015-03-04 23:47:57 +08:00
|
|
|
// It is the responsibility of the target to only signal expansion via
|
2014-09-17 08:06:58 +08:00
|
|
|
// shouldExpandAtomicRMW in cases where this is required and possible.
|
2014-04-03 19:44:58 +08:00
|
|
|
IRBuilder<> Builder(SI);
|
2021-02-09 12:07:12 +08:00
|
|
|
AtomicRMWInst *AI = Builder.CreateAtomicRMW(
|
|
|
|
AtomicRMWInst::Xchg, SI->getPointerOperand(), SI->getValueOperand(),
|
|
|
|
SI->getAlign(), SI->getOrdering());
|
2014-04-03 19:44:58 +08:00
|
|
|
SI->eraseFromParent();
|
|
|
|
|
|
|
|
// Now we have an appropriate swap instruction, lower it as usual.
|
2022-03-19 04:26:37 +08:00
|
|
|
tryExpandAtomicRMW(AI);
|
2014-04-03 19:44:58 +08:00
|
|
|
}
|
|
|
|
|
2015-08-03 23:29:47 +08:00
|
|
|
static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
|
2021-02-09 12:07:12 +08:00
|
|
|
Value *Loaded, Value *NewVal, Align AddrAlign,
|
2021-04-06 05:45:33 +08:00
|
|
|
AtomicOrdering MemOpOrder, SyncScope::ID SSID,
|
|
|
|
Value *&Success, Value *&NewLoaded) {
|
2019-01-17 18:49:01 +08:00
|
|
|
Type *OrigTy = NewVal->getType();
|
|
|
|
|
|
|
|
// This code can go away when cmpxchg supports FP types.
|
|
|
|
bool NeedBitcast = OrigTy->isFloatingPointTy();
|
|
|
|
if (NeedBitcast) {
|
|
|
|
IntegerType *IntTy = Builder.getIntNTy(OrigTy->getPrimitiveSizeInBits());
|
|
|
|
unsigned AS = Addr->getType()->getPointerAddressSpace();
|
|
|
|
Addr = Builder.CreateBitCast(Addr, IntTy->getPointerTo(AS));
|
|
|
|
NewVal = Builder.CreateBitCast(NewVal, IntTy);
|
|
|
|
Loaded = Builder.CreateBitCast(Loaded, IntTy);
|
|
|
|
}
|
|
|
|
|
2021-02-09 12:07:12 +08:00
|
|
|
Value *Pair = Builder.CreateAtomicCmpXchg(
|
|
|
|
Addr, Loaded, NewVal, AddrAlign, MemOpOrder,
|
2021-04-06 05:45:33 +08:00
|
|
|
AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder), SSID);
|
2015-08-03 23:29:47 +08:00
|
|
|
Success = Builder.CreateExtractValue(Pair, 1, "success");
|
|
|
|
NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
|
2019-01-17 18:49:01 +08:00
|
|
|
|
|
|
|
if (NeedBitcast)
|
|
|
|
NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
|
2015-08-03 23:29:47 +08:00
|
|
|
}
|
|
|
|
|
2014-09-17 08:06:58 +08:00
|
|
|
/// Emit IR to implement the given atomicrmw operation on values in registers,
|
|
|
|
/// returning the new value.
|
|
|
|
static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
|
|
|
|
Value *Loaded, Value *Inc) {
|
|
|
|
Value *NewVal;
|
|
|
|
switch (Op) {
|
|
|
|
case AtomicRMWInst::Xchg:
|
|
|
|
return Inc;
|
|
|
|
case AtomicRMWInst::Add:
|
|
|
|
return Builder.CreateAdd(Loaded, Inc, "new");
|
|
|
|
case AtomicRMWInst::Sub:
|
|
|
|
return Builder.CreateSub(Loaded, Inc, "new");
|
|
|
|
case AtomicRMWInst::And:
|
|
|
|
return Builder.CreateAnd(Loaded, Inc, "new");
|
|
|
|
case AtomicRMWInst::Nand:
|
|
|
|
return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
|
|
|
|
case AtomicRMWInst::Or:
|
|
|
|
return Builder.CreateOr(Loaded, Inc, "new");
|
|
|
|
case AtomicRMWInst::Xor:
|
|
|
|
return Builder.CreateXor(Loaded, Inc, "new");
|
|
|
|
case AtomicRMWInst::Max:
|
|
|
|
NewVal = Builder.CreateICmpSGT(Loaded, Inc);
|
|
|
|
return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
|
|
|
|
case AtomicRMWInst::Min:
|
|
|
|
NewVal = Builder.CreateICmpSLE(Loaded, Inc);
|
|
|
|
return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
|
|
|
|
case AtomicRMWInst::UMax:
|
|
|
|
NewVal = Builder.CreateICmpUGT(Loaded, Inc);
|
|
|
|
return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
|
|
|
|
case AtomicRMWInst::UMin:
|
|
|
|
NewVal = Builder.CreateICmpULE(Loaded, Inc);
|
|
|
|
return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
|
2019-01-23 02:18:02 +08:00
|
|
|
case AtomicRMWInst::FAdd:
|
|
|
|
return Builder.CreateFAdd(Loaded, Inc, "new");
|
|
|
|
case AtomicRMWInst::FSub:
|
|
|
|
return Builder.CreateFSub(Loaded, Inc, "new");
|
2014-09-17 08:06:58 +08:00
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown atomic op");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-03 02:12:57 +08:00
|
|
|
bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
|
2021-08-17 04:56:01 +08:00
|
|
|
LLVMContext &Ctx = AI->getModule()->getContext();
|
|
|
|
TargetLowering::AtomicExpansionKind Kind = TLI->shouldExpandAtomicRMWInIR(AI);
|
|
|
|
switch (Kind) {
|
2015-12-03 02:12:57 +08:00
|
|
|
case TargetLoweringBase::AtomicExpansionKind::None:
|
|
|
|
return false;
|
2016-06-18 02:11:48 +08:00
|
|
|
case TargetLoweringBase::AtomicExpansionKind::LLSC: {
|
|
|
|
unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
|
|
|
|
unsigned ValueSize = getAtomicOpSize(AI);
|
|
|
|
if (ValueSize < MinCASSize) {
|
2020-03-24 01:47:32 +08:00
|
|
|
expandPartwordAtomicRMW(AI,
|
|
|
|
TargetLoweringBase::AtomicExpansionKind::LLSC);
|
2016-06-18 02:11:48 +08:00
|
|
|
} else {
|
|
|
|
auto PerformOp = [&](IRBuilder<> &Builder, Value *Loaded) {
|
|
|
|
return performAtomicOp(AI->getOperation(), Builder, Loaded,
|
|
|
|
AI->getValOperand());
|
|
|
|
};
|
|
|
|
expandAtomicOpToLLSC(AI, AI->getType(), AI->getPointerOperand(),
|
2021-02-09 12:07:12 +08:00
|
|
|
AI->getAlign(), AI->getOrdering(), PerformOp);
|
2016-06-18 02:11:48 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
case TargetLoweringBase::AtomicExpansionKind::CmpXChg: {
|
|
|
|
unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
|
|
|
|
unsigned ValueSize = getAtomicOpSize(AI);
|
|
|
|
if (ValueSize < MinCASSize) {
|
2019-06-11 09:35:00 +08:00
|
|
|
// TODO: Handle atomicrmw fadd/fsub
|
|
|
|
if (AI->getType()->isFloatingPointTy())
|
|
|
|
return false;
|
|
|
|
|
2016-06-18 02:11:48 +08:00
|
|
|
expandPartwordAtomicRMW(AI,
|
|
|
|
TargetLoweringBase::AtomicExpansionKind::CmpXChg);
|
|
|
|
} else {
|
2021-08-17 04:56:01 +08:00
|
|
|
SmallVector<StringRef> SSNs;
|
|
|
|
Ctx.getSyncScopeNames(SSNs);
|
|
|
|
auto MemScope = SSNs[AI->getSyncScopeID()].empty()
|
|
|
|
? "system"
|
|
|
|
: SSNs[AI->getSyncScopeID()];
|
|
|
|
OptimizationRemarkEmitter ORE(AI->getFunction());
|
|
|
|
ORE.emit([&]() {
|
2021-08-20 10:50:36 +08:00
|
|
|
return OptimizationRemark(DEBUG_TYPE, "Passed", AI)
|
2021-08-17 04:56:01 +08:00
|
|
|
<< "A compare and swap loop was generated for an atomic "
|
|
|
|
<< AI->getOperationName(AI->getOperation()) << " operation at "
|
|
|
|
<< MemScope << " memory scope";
|
|
|
|
});
|
2016-06-18 02:11:48 +08:00
|
|
|
expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2018-09-19 18:54:22 +08:00
|
|
|
case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: {
|
|
|
|
expandAtomicRMWToMaskedIntrinsic(AI);
|
|
|
|
return true;
|
|
|
|
}
|
2022-03-01 09:56:49 +08:00
|
|
|
case TargetLoweringBase::AtomicExpansionKind::BitTestIntrinsic: {
|
|
|
|
TLI->emitBitTestAtomicRMWIntrinsic(AI);
|
|
|
|
return true;
|
|
|
|
}
|
2015-12-03 02:12:57 +08:00
|
|
|
default:
|
|
|
|
llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-18 02:11:48 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct PartwordMaskValues {
|
2020-03-24 01:47:32 +08:00
|
|
|
// These three fields are guaranteed to be set by createMaskInstrs.
|
|
|
|
Type *WordType = nullptr;
|
|
|
|
Type *ValueType = nullptr;
|
|
|
|
Value *AlignedAddr = nullptr;
|
2021-02-09 12:07:12 +08:00
|
|
|
Align AlignedAddrAlignment;
|
2020-03-24 01:47:32 +08:00
|
|
|
// The remaining fields can be null.
|
|
|
|
Value *ShiftAmt = nullptr;
|
|
|
|
Value *Mask = nullptr;
|
|
|
|
Value *Inv_Mask = nullptr;
|
2016-06-18 02:11:48 +08:00
|
|
|
};
|
2017-09-23 07:46:57 +08:00
|
|
|
|
2020-03-24 01:47:32 +08:00
|
|
|
LLVM_ATTRIBUTE_UNUSED
|
|
|
|
raw_ostream &operator<<(raw_ostream &O, const PartwordMaskValues &PMV) {
|
|
|
|
auto PrintObj = [&O](auto *V) {
|
|
|
|
if (V)
|
|
|
|
O << *V;
|
|
|
|
else
|
|
|
|
O << "nullptr";
|
|
|
|
O << '\n';
|
|
|
|
};
|
|
|
|
O << "PartwordMaskValues {\n";
|
|
|
|
O << " WordType: ";
|
|
|
|
PrintObj(PMV.WordType);
|
|
|
|
O << " ValueType: ";
|
|
|
|
PrintObj(PMV.ValueType);
|
|
|
|
O << " AlignedAddr: ";
|
|
|
|
PrintObj(PMV.AlignedAddr);
|
2021-02-09 12:07:12 +08:00
|
|
|
O << " AlignedAddrAlignment: " << PMV.AlignedAddrAlignment.value() << '\n';
|
2020-03-24 01:47:32 +08:00
|
|
|
O << " ShiftAmt: ";
|
|
|
|
PrintObj(PMV.ShiftAmt);
|
|
|
|
O << " Mask: ";
|
|
|
|
PrintObj(PMV.Mask);
|
|
|
|
O << " Inv_Mask: ";
|
|
|
|
PrintObj(PMV.Inv_Mask);
|
|
|
|
O << "}\n";
|
|
|
|
return O;
|
|
|
|
}
|
|
|
|
|
2016-06-18 02:11:48 +08:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
/// This is a helper function which builds instructions to provide
|
|
|
|
/// values necessary for partword atomic operations. It takes an
|
|
|
|
/// incoming address, Addr, and ValueType, and constructs the address,
|
|
|
|
/// shift-amounts and masks needed to work with a larger value of size
|
|
|
|
/// WordSize.
|
|
|
|
///
|
|
|
|
/// AlignedAddr: Addr rounded down to a multiple of WordSize
|
|
|
|
///
|
|
|
|
/// ShiftAmt: Number of bits to right-shift a WordSize value loaded
|
|
|
|
/// from AlignAddr for it to have the same value as if
|
|
|
|
/// ValueType was loaded from Addr.
|
|
|
|
///
|
|
|
|
/// Mask: Value to mask with the value loaded from AlignAddr to
|
|
|
|
/// include only the part that would've been loaded from Addr.
|
|
|
|
///
|
|
|
|
/// Inv_Mask: The inverse of Mask.
|
|
|
|
static PartwordMaskValues createMaskInstrs(IRBuilder<> &Builder, Instruction *I,
|
|
|
|
Type *ValueType, Value *Addr,
|
2021-02-09 12:07:12 +08:00
|
|
|
Align AddrAlign,
|
2020-03-24 01:47:32 +08:00
|
|
|
unsigned MinWordSize) {
|
|
|
|
PartwordMaskValues PMV;
|
2016-06-18 02:11:48 +08:00
|
|
|
|
|
|
|
Module *M = I->getModule();
|
2020-03-24 01:47:32 +08:00
|
|
|
LLVMContext &Ctx = M->getContext();
|
2016-06-18 02:11:48 +08:00
|
|
|
const DataLayout &DL = M->getDataLayout();
|
|
|
|
unsigned ValueSize = DL.getTypeStoreSize(ValueType);
|
|
|
|
|
2020-03-24 01:47:32 +08:00
|
|
|
PMV.ValueType = ValueType;
|
|
|
|
PMV.WordType = MinWordSize > ValueSize ? Type::getIntNTy(Ctx, MinWordSize * 8)
|
|
|
|
: ValueType;
|
|
|
|
if (PMV.ValueType == PMV.WordType) {
|
|
|
|
PMV.AlignedAddr = Addr;
|
2021-02-09 12:07:12 +08:00
|
|
|
PMV.AlignedAddrAlignment = AddrAlign;
|
2021-07-15 08:49:42 +08:00
|
|
|
PMV.ShiftAmt = ConstantInt::get(PMV.ValueType, 0);
|
2022-03-18 13:23:38 +08:00
|
|
|
PMV.Mask = ConstantInt::get(PMV.ValueType, ~0, /*isSigned*/ true);
|
2020-03-24 01:47:32 +08:00
|
|
|
return PMV;
|
|
|
|
}
|
2016-06-18 02:11:48 +08:00
|
|
|
|
2020-03-24 01:47:32 +08:00
|
|
|
assert(ValueSize < MinWordSize);
|
2016-06-18 02:11:48 +08:00
|
|
|
|
|
|
|
Type *WordPtrType =
|
2020-03-24 01:47:32 +08:00
|
|
|
PMV.WordType->getPointerTo(Addr->getType()->getPointerAddressSpace());
|
2016-06-18 02:11:48 +08:00
|
|
|
|
2021-02-09 12:07:12 +08:00
|
|
|
// TODO: we could skip some of this if AddrAlign >= MinWordSize.
|
2016-06-18 02:11:48 +08:00
|
|
|
Value *AddrInt = Builder.CreatePtrToInt(Addr, DL.getIntPtrType(Ctx));
|
2020-03-24 01:47:32 +08:00
|
|
|
PMV.AlignedAddr = Builder.CreateIntToPtr(
|
|
|
|
Builder.CreateAnd(AddrInt, ~(uint64_t)(MinWordSize - 1)), WordPtrType,
|
2016-06-18 02:11:48 +08:00
|
|
|
"AlignedAddr");
|
2021-02-09 12:07:12 +08:00
|
|
|
PMV.AlignedAddrAlignment = Align(MinWordSize);
|
2016-06-18 02:11:48 +08:00
|
|
|
|
2020-03-24 01:47:32 +08:00
|
|
|
Value *PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1, "PtrLSB");
|
2016-06-18 02:11:48 +08:00
|
|
|
if (DL.isLittleEndian()) {
|
|
|
|
// turn bytes into bits
|
2020-03-24 01:47:32 +08:00
|
|
|
PMV.ShiftAmt = Builder.CreateShl(PtrLSB, 3);
|
2016-06-18 02:11:48 +08:00
|
|
|
} else {
|
|
|
|
// turn bytes into bits, and count from the other side.
|
2020-03-24 01:47:32 +08:00
|
|
|
PMV.ShiftAmt = Builder.CreateShl(
|
|
|
|
Builder.CreateXor(PtrLSB, MinWordSize - ValueSize), 3);
|
2016-06-18 02:11:48 +08:00
|
|
|
}
|
|
|
|
|
2020-03-24 01:47:32 +08:00
|
|
|
PMV.ShiftAmt = Builder.CreateTrunc(PMV.ShiftAmt, PMV.WordType, "ShiftAmt");
|
|
|
|
PMV.Mask = Builder.CreateShl(
|
|
|
|
ConstantInt::get(PMV.WordType, (1 << (ValueSize * 8)) - 1), PMV.ShiftAmt,
|
2016-06-18 02:11:48 +08:00
|
|
|
"Mask");
|
2020-03-24 01:47:32 +08:00
|
|
|
PMV.Inv_Mask = Builder.CreateNot(PMV.Mask, "Inv_Mask");
|
|
|
|
return PMV;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Value *extractMaskedValue(IRBuilder<> &Builder, Value *WideWord,
|
|
|
|
const PartwordMaskValues &PMV) {
|
|
|
|
assert(WideWord->getType() == PMV.WordType && "Widened type mismatch");
|
|
|
|
if (PMV.WordType == PMV.ValueType)
|
|
|
|
return WideWord;
|
2016-06-18 02:11:48 +08:00
|
|
|
|
2020-03-24 01:47:32 +08:00
|
|
|
Value *Shift = Builder.CreateLShr(WideWord, PMV.ShiftAmt, "shifted");
|
|
|
|
Value *Trunc = Builder.CreateTrunc(Shift, PMV.ValueType, "extracted");
|
|
|
|
return Trunc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Value *insertMaskedValue(IRBuilder<> &Builder, Value *WideWord,
|
|
|
|
Value *Updated, const PartwordMaskValues &PMV) {
|
|
|
|
assert(WideWord->getType() == PMV.WordType && "Widened type mismatch");
|
|
|
|
assert(Updated->getType() == PMV.ValueType && "Value type mismatch");
|
|
|
|
if (PMV.WordType == PMV.ValueType)
|
|
|
|
return Updated;
|
|
|
|
|
|
|
|
Value *ZExt = Builder.CreateZExt(Updated, PMV.WordType, "extended");
|
|
|
|
Value *Shift =
|
|
|
|
Builder.CreateShl(ZExt, PMV.ShiftAmt, "shifted", /*HasNUW*/ true);
|
|
|
|
Value *And = Builder.CreateAnd(WideWord, PMV.Inv_Mask, "unmasked");
|
|
|
|
Value *Or = Builder.CreateOr(And, Shift, "inserted");
|
|
|
|
return Or;
|
2016-06-18 02:11:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Emit IR to implement a masked version of a given atomicrmw
|
|
|
|
/// operation. (That is, only the bits under the Mask should be
|
|
|
|
/// affected by the operation)
|
|
|
|
static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
|
|
|
|
IRBuilder<> &Builder, Value *Loaded,
|
|
|
|
Value *Shifted_Inc, Value *Inc,
|
|
|
|
const PartwordMaskValues &PMV) {
|
2018-09-19 18:54:22 +08:00
|
|
|
// TODO: update to use
|
|
|
|
// https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge in order
|
|
|
|
// to merge bits from two values without requiring PMV.Inv_Mask.
|
2016-06-18 02:11:48 +08:00
|
|
|
switch (Op) {
|
|
|
|
case AtomicRMWInst::Xchg: {
|
|
|
|
Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
|
|
|
|
Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, Shifted_Inc);
|
|
|
|
return FinalVal;
|
|
|
|
}
|
|
|
|
case AtomicRMWInst::Or:
|
|
|
|
case AtomicRMWInst::Xor:
|
2018-08-17 22:03:37 +08:00
|
|
|
case AtomicRMWInst::And:
|
|
|
|
llvm_unreachable("Or/Xor/And handled by widenPartwordAtomicRMW");
|
2016-06-18 02:11:48 +08:00
|
|
|
case AtomicRMWInst::Add:
|
|
|
|
case AtomicRMWInst::Sub:
|
|
|
|
case AtomicRMWInst::Nand: {
|
|
|
|
// The other arithmetic ops need to be masked into place.
|
|
|
|
Value *NewVal = performAtomicOp(Op, Builder, Loaded, Shifted_Inc);
|
|
|
|
Value *NewVal_Masked = Builder.CreateAnd(NewVal, PMV.Mask);
|
|
|
|
Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
|
|
|
|
Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Masked);
|
|
|
|
return FinalVal;
|
|
|
|
}
|
|
|
|
case AtomicRMWInst::Max:
|
|
|
|
case AtomicRMWInst::Min:
|
|
|
|
case AtomicRMWInst::UMax:
|
|
|
|
case AtomicRMWInst::UMin: {
|
|
|
|
// Finally, comparison ops will operate on the full value, so
|
|
|
|
// truncate down to the original size, and expand out again after
|
|
|
|
// doing the operation.
|
2020-03-24 01:47:32 +08:00
|
|
|
Value *Loaded_Extract = extractMaskedValue(Builder, Loaded, PMV);
|
|
|
|
Value *NewVal = performAtomicOp(Op, Builder, Loaded_Extract, Inc);
|
|
|
|
Value *FinalVal = insertMaskedValue(Builder, Loaded, NewVal, PMV);
|
2016-06-18 02:11:48 +08:00
|
|
|
return FinalVal;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown atomic op");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Expand a sub-word atomicrmw operation into an appropriate
|
|
|
|
/// word-sized operation.
|
|
|
|
///
|
|
|
|
/// It will create an LL/SC or cmpxchg loop, as appropriate, the same
|
|
|
|
/// way as a typical atomicrmw expansion. The only difference here is
|
2020-03-24 01:47:32 +08:00
|
|
|
/// that the operation inside of the loop may operate upon only a
|
2016-06-18 02:11:48 +08:00
|
|
|
/// part of the value.
|
|
|
|
void AtomicExpand::expandPartwordAtomicRMW(
|
|
|
|
AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
|
|
|
|
AtomicOrdering MemOpOrder = AI->getOrdering();
|
2021-04-06 05:45:33 +08:00
|
|
|
SyncScope::ID SSID = AI->getSyncScopeID();
|
2016-06-18 02:11:48 +08:00
|
|
|
|
|
|
|
IRBuilder<> Builder(AI);
|
|
|
|
|
|
|
|
PartwordMaskValues PMV =
|
|
|
|
createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
|
2021-02-09 12:07:12 +08:00
|
|
|
AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
|
2016-06-18 02:11:48 +08:00
|
|
|
|
|
|
|
Value *ValOperand_Shifted =
|
|
|
|
Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType),
|
|
|
|
PMV.ShiftAmt, "ValOperand_Shifted");
|
|
|
|
|
|
|
|
auto PerformPartwordOp = [&](IRBuilder<> &Builder, Value *Loaded) {
|
|
|
|
return performMaskedAtomicOp(AI->getOperation(), Builder, Loaded,
|
|
|
|
ValOperand_Shifted, AI->getValOperand(), PMV);
|
|
|
|
};
|
|
|
|
|
2020-03-24 01:47:32 +08:00
|
|
|
Value *OldResult;
|
|
|
|
if (ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg) {
|
2021-02-09 12:07:12 +08:00
|
|
|
OldResult = insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr,
|
2022-03-17 23:46:12 +08:00
|
|
|
PMV.AlignedAddrAlignment, MemOpOrder, SSID,
|
|
|
|
PerformPartwordOp, createCmpXchgInstFun);
|
2020-03-24 01:47:32 +08:00
|
|
|
} else {
|
|
|
|
assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::LLSC);
|
|
|
|
OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr,
|
2021-02-09 12:07:12 +08:00
|
|
|
PMV.AlignedAddrAlignment, MemOpOrder,
|
|
|
|
PerformPartwordOp);
|
2020-03-24 01:47:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Value *FinalOldResult = extractMaskedValue(Builder, OldResult, PMV);
|
2016-06-18 02:11:48 +08:00
|
|
|
AI->replaceAllUsesWith(FinalOldResult);
|
|
|
|
AI->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2018-08-17 22:03:37 +08:00
|
|
|
// Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
|
|
|
|
AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
|
|
|
|
IRBuilder<> Builder(AI);
|
|
|
|
AtomicRMWInst::BinOp Op = AI->getOperation();
|
|
|
|
|
|
|
|
assert((Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
|
|
|
|
Op == AtomicRMWInst::And) &&
|
|
|
|
"Unable to widen operation");
|
|
|
|
|
|
|
|
PartwordMaskValues PMV =
|
|
|
|
createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
|
2021-02-09 12:07:12 +08:00
|
|
|
AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
|
2018-08-17 22:03:37 +08:00
|
|
|
|
|
|
|
Value *ValOperand_Shifted =
|
|
|
|
Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType),
|
|
|
|
PMV.ShiftAmt, "ValOperand_Shifted");
|
|
|
|
|
|
|
|
Value *NewOperand;
|
|
|
|
|
|
|
|
if (Op == AtomicRMWInst::And)
|
|
|
|
NewOperand =
|
|
|
|
Builder.CreateOr(PMV.Inv_Mask, ValOperand_Shifted, "AndOperand");
|
|
|
|
else
|
|
|
|
NewOperand = ValOperand_Shifted;
|
|
|
|
|
2021-02-09 12:07:12 +08:00
|
|
|
AtomicRMWInst *NewAI =
|
|
|
|
Builder.CreateAtomicRMW(Op, PMV.AlignedAddr, NewOperand,
|
|
|
|
PMV.AlignedAddrAlignment, AI->getOrdering());
|
2018-08-17 22:03:37 +08:00
|
|
|
|
2020-03-24 01:47:32 +08:00
|
|
|
Value *FinalOldResult = extractMaskedValue(Builder, NewAI, PMV);
|
2018-08-17 22:03:37 +08:00
|
|
|
AI->replaceAllUsesWith(FinalOldResult);
|
|
|
|
AI->eraseFromParent();
|
|
|
|
return NewAI;
|
|
|
|
}
|
|
|
|
|
2020-07-09 15:36:41 +08:00
|
|
|
bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
|
2016-06-18 02:11:48 +08:00
|
|
|
// The basic idea here is that we're expanding a cmpxchg of a
|
|
|
|
// smaller memory size up to a word-sized cmpxchg. To do this, we
|
|
|
|
// need to add a retry-loop for strong cmpxchg, so that
|
|
|
|
// modifications to other parts of the word don't cause a spurious
|
|
|
|
// failure.
|
|
|
|
|
|
|
|
// This generates code like the following:
|
|
|
|
// [[Setup mask values PMV.*]]
|
|
|
|
// %NewVal_Shifted = shl i32 %NewVal, %PMV.ShiftAmt
|
|
|
|
// %Cmp_Shifted = shl i32 %Cmp, %PMV.ShiftAmt
|
|
|
|
// %InitLoaded = load i32* %addr
|
|
|
|
// %InitLoaded_MaskOut = and i32 %InitLoaded, %PMV.Inv_Mask
|
|
|
|
// br partword.cmpxchg.loop
|
|
|
|
// partword.cmpxchg.loop:
|
|
|
|
// %Loaded_MaskOut = phi i32 [ %InitLoaded_MaskOut, %entry ],
|
|
|
|
// [ %OldVal_MaskOut, %partword.cmpxchg.failure ]
|
|
|
|
// %FullWord_NewVal = or i32 %Loaded_MaskOut, %NewVal_Shifted
|
|
|
|
// %FullWord_Cmp = or i32 %Loaded_MaskOut, %Cmp_Shifted
|
|
|
|
// %NewCI = cmpxchg i32* %PMV.AlignedAddr, i32 %FullWord_Cmp,
|
|
|
|
// i32 %FullWord_NewVal success_ordering failure_ordering
|
|
|
|
// %OldVal = extractvalue { i32, i1 } %NewCI, 0
|
|
|
|
// %Success = extractvalue { i32, i1 } %NewCI, 1
|
|
|
|
// br i1 %Success, label %partword.cmpxchg.end,
|
|
|
|
// label %partword.cmpxchg.failure
|
|
|
|
// partword.cmpxchg.failure:
|
|
|
|
// %OldVal_MaskOut = and i32 %OldVal, %PMV.Inv_Mask
|
|
|
|
// %ShouldContinue = icmp ne i32 %Loaded_MaskOut, %OldVal_MaskOut
|
|
|
|
// br i1 %ShouldContinue, label %partword.cmpxchg.loop,
|
|
|
|
// label %partword.cmpxchg.end
|
|
|
|
// partword.cmpxchg.end:
|
|
|
|
// %tmp1 = lshr i32 %OldVal, %PMV.ShiftAmt
|
|
|
|
// %FinalOldVal = trunc i32 %tmp1 to i8
|
|
|
|
// %tmp2 = insertvalue { i8, i1 } undef, i8 %FinalOldVal, 0
|
|
|
|
// %Res = insertvalue { i8, i1 } %25, i1 %Success, 1
|
|
|
|
|
|
|
|
Value *Addr = CI->getPointerOperand();
|
|
|
|
Value *Cmp = CI->getCompareOperand();
|
|
|
|
Value *NewVal = CI->getNewValOperand();
|
|
|
|
|
|
|
|
BasicBlock *BB = CI->getParent();
|
|
|
|
Function *F = BB->getParent();
|
|
|
|
IRBuilder<> Builder(CI);
|
|
|
|
LLVMContext &Ctx = Builder.getContext();
|
|
|
|
|
|
|
|
BasicBlock *EndBB =
|
|
|
|
BB->splitBasicBlock(CI->getIterator(), "partword.cmpxchg.end");
|
|
|
|
auto FailureBB =
|
|
|
|
BasicBlock::Create(Ctx, "partword.cmpxchg.failure", F, EndBB);
|
|
|
|
auto LoopBB = BasicBlock::Create(Ctx, "partword.cmpxchg.loop", F, FailureBB);
|
|
|
|
|
|
|
|
// The split call above "helpfully" added a branch at the end of BB
|
|
|
|
// (to the wrong place).
|
|
|
|
std::prev(BB->end())->eraseFromParent();
|
|
|
|
Builder.SetInsertPoint(BB);
|
|
|
|
|
2021-02-09 12:07:12 +08:00
|
|
|
PartwordMaskValues PMV =
|
|
|
|
createMaskInstrs(Builder, CI, CI->getCompareOperand()->getType(), Addr,
|
|
|
|
CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
|
2016-06-18 02:11:48 +08:00
|
|
|
|
|
|
|
// Shift the incoming values over, into the right location in the word.
|
|
|
|
Value *NewVal_Shifted =
|
|
|
|
Builder.CreateShl(Builder.CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt);
|
|
|
|
Value *Cmp_Shifted =
|
|
|
|
Builder.CreateShl(Builder.CreateZExt(Cmp, PMV.WordType), PMV.ShiftAmt);
|
|
|
|
|
|
|
|
// Load the entire current word, and mask into place the expected and new
|
|
|
|
// values
|
|
|
|
LoadInst *InitLoaded = Builder.CreateLoad(PMV.WordType, PMV.AlignedAddr);
|
|
|
|
InitLoaded->setVolatile(CI->isVolatile());
|
|
|
|
Value *InitLoaded_MaskOut = Builder.CreateAnd(InitLoaded, PMV.Inv_Mask);
|
|
|
|
Builder.CreateBr(LoopBB);
|
|
|
|
|
|
|
|
// partword.cmpxchg.loop:
|
|
|
|
Builder.SetInsertPoint(LoopBB);
|
|
|
|
PHINode *Loaded_MaskOut = Builder.CreatePHI(PMV.WordType, 2);
|
|
|
|
Loaded_MaskOut->addIncoming(InitLoaded_MaskOut, BB);
|
|
|
|
|
|
|
|
// Mask/Or the expected and new values into place in the loaded word.
|
|
|
|
Value *FullWord_NewVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shifted);
|
|
|
|
Value *FullWord_Cmp = Builder.CreateOr(Loaded_MaskOut, Cmp_Shifted);
|
|
|
|
AtomicCmpXchgInst *NewCI = Builder.CreateAtomicCmpXchg(
|
2021-02-09 12:07:12 +08:00
|
|
|
PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, PMV.AlignedAddrAlignment,
|
|
|
|
CI->getSuccessOrdering(), CI->getFailureOrdering(), CI->getSyncScopeID());
|
2016-06-18 02:11:48 +08:00
|
|
|
NewCI->setVolatile(CI->isVolatile());
|
|
|
|
// When we're building a strong cmpxchg, we need a loop, so you
|
|
|
|
// might think we could use a weak cmpxchg inside. But, using strong
|
|
|
|
// allows the below comparison for ShouldContinue, and we're
|
|
|
|
// expecting the underlying cmpxchg to be a machine instruction,
|
|
|
|
// which is strong anyways.
|
|
|
|
NewCI->setWeak(CI->isWeak());
|
|
|
|
|
|
|
|
Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
|
|
|
|
Value *Success = Builder.CreateExtractValue(NewCI, 1);
|
|
|
|
|
|
|
|
if (CI->isWeak())
|
|
|
|
Builder.CreateBr(EndBB);
|
|
|
|
else
|
|
|
|
Builder.CreateCondBr(Success, EndBB, FailureBB);
|
|
|
|
|
|
|
|
// partword.cmpxchg.failure:
|
|
|
|
Builder.SetInsertPoint(FailureBB);
|
|
|
|
// Upon failure, verify that the masked-out part of the loaded value
|
|
|
|
// has been modified. If it didn't, abort the cmpxchg, since the
|
|
|
|
// masked-in part must've.
|
|
|
|
Value *OldVal_MaskOut = Builder.CreateAnd(OldVal, PMV.Inv_Mask);
|
|
|
|
Value *ShouldContinue = Builder.CreateICmpNE(Loaded_MaskOut, OldVal_MaskOut);
|
|
|
|
Builder.CreateCondBr(ShouldContinue, LoopBB, EndBB);
|
|
|
|
|
|
|
|
// Add the second value to the phi from above
|
|
|
|
Loaded_MaskOut->addIncoming(OldVal_MaskOut, FailureBB);
|
|
|
|
|
|
|
|
// partword.cmpxchg.end:
|
|
|
|
Builder.SetInsertPoint(CI);
|
|
|
|
|
2020-03-24 01:47:32 +08:00
|
|
|
Value *FinalOldVal = extractMaskedValue(Builder, OldVal, PMV);
|
2016-06-18 02:11:48 +08:00
|
|
|
Value *Res = UndefValue::get(CI->getType());
|
|
|
|
Res = Builder.CreateInsertValue(Res, FinalOldVal, 0);
|
|
|
|
Res = Builder.CreateInsertValue(Res, Success, 1);
|
|
|
|
|
|
|
|
CI->replaceAllUsesWith(Res);
|
|
|
|
CI->eraseFromParent();
|
2020-07-09 15:36:41 +08:00
|
|
|
return true;
|
2016-06-18 02:11:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicExpand::expandAtomicOpToLLSC(
|
2021-02-09 12:07:12 +08:00
|
|
|
Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign,
|
|
|
|
AtomicOrdering MemOpOrder,
|
2016-06-18 02:11:48 +08:00
|
|
|
function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) {
|
|
|
|
IRBuilder<> Builder(I);
|
2021-02-09 12:07:12 +08:00
|
|
|
Value *Loaded = insertRMWLLSCLoop(Builder, ResultType, Addr, AddrAlign,
|
|
|
|
MemOpOrder, PerformOp);
|
2016-06-18 02:11:48 +08:00
|
|
|
|
|
|
|
I->replaceAllUsesWith(Loaded);
|
|
|
|
I->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2018-09-19 18:54:22 +08:00
|
|
|
void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
|
|
|
|
IRBuilder<> Builder(AI);
|
|
|
|
|
|
|
|
PartwordMaskValues PMV =
|
|
|
|
createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
|
2021-02-09 12:07:12 +08:00
|
|
|
AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
|
2018-09-19 18:54:22 +08:00
|
|
|
|
|
|
|
// The value operand must be sign-extended for signed min/max so that the
|
|
|
|
// target's signed comparison instructions can be used. Otherwise, just
|
|
|
|
// zero-ext.
|
|
|
|
Instruction::CastOps CastOp = Instruction::ZExt;
|
|
|
|
AtomicRMWInst::BinOp RMWOp = AI->getOperation();
|
|
|
|
if (RMWOp == AtomicRMWInst::Max || RMWOp == AtomicRMWInst::Min)
|
|
|
|
CastOp = Instruction::SExt;
|
|
|
|
|
|
|
|
Value *ValOperand_Shifted = Builder.CreateShl(
|
|
|
|
Builder.CreateCast(CastOp, AI->getValOperand(), PMV.WordType),
|
|
|
|
PMV.ShiftAmt, "ValOperand_Shifted");
|
|
|
|
Value *OldResult = TLI->emitMaskedAtomicRMWIntrinsic(
|
|
|
|
Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt,
|
|
|
|
AI->getOrdering());
|
2020-03-24 01:47:32 +08:00
|
|
|
Value *FinalOldResult = extractMaskedValue(Builder, OldResult, PMV);
|
2018-09-19 18:54:22 +08:00
|
|
|
AI->replaceAllUsesWith(FinalOldResult);
|
|
|
|
AI->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2018-11-30 04:43:42 +08:00
|
|
|
void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
|
|
|
|
IRBuilder<> Builder(CI);
|
|
|
|
|
|
|
|
PartwordMaskValues PMV = createMaskInstrs(
|
|
|
|
Builder, CI, CI->getCompareOperand()->getType(), CI->getPointerOperand(),
|
2021-02-09 12:07:12 +08:00
|
|
|
CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
|
2018-11-30 04:43:42 +08:00
|
|
|
|
|
|
|
Value *CmpVal_Shifted = Builder.CreateShl(
|
|
|
|
Builder.CreateZExt(CI->getCompareOperand(), PMV.WordType), PMV.ShiftAmt,
|
|
|
|
"CmpVal_Shifted");
|
|
|
|
Value *NewVal_Shifted = Builder.CreateShl(
|
|
|
|
Builder.CreateZExt(CI->getNewValOperand(), PMV.WordType), PMV.ShiftAmt,
|
|
|
|
"NewVal_Shifted");
|
|
|
|
Value *OldVal = TLI->emitMaskedAtomicCmpXchgIntrinsic(
|
|
|
|
Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask,
|
2021-05-29 05:05:07 +08:00
|
|
|
CI->getMergedOrdering());
|
2020-03-24 01:47:32 +08:00
|
|
|
Value *FinalOldVal = extractMaskedValue(Builder, OldVal, PMV);
|
2018-11-30 04:43:42 +08:00
|
|
|
Value *Res = UndefValue::get(CI->getType());
|
|
|
|
Res = Builder.CreateInsertValue(Res, FinalOldVal, 0);
|
|
|
|
Value *Success = Builder.CreateICmpEQ(
|
|
|
|
CmpVal_Shifted, Builder.CreateAnd(OldVal, PMV.Mask), "Success");
|
|
|
|
Res = Builder.CreateInsertValue(Res, Success, 1);
|
|
|
|
|
|
|
|
CI->replaceAllUsesWith(Res);
|
|
|
|
CI->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2016-06-18 02:11:48 +08:00
|
|
|
Value *AtomicExpand::insertRMWLLSCLoop(
|
2021-02-09 12:07:12 +08:00
|
|
|
IRBuilder<> &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
|
2016-06-18 02:11:48 +08:00
|
|
|
AtomicOrdering MemOpOrder,
|
|
|
|
function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) {
|
|
|
|
LLVMContext &Ctx = Builder.getContext();
|
|
|
|
BasicBlock *BB = Builder.GetInsertBlock();
|
|
|
|
Function *F = BB->getParent();
|
2014-04-03 19:44:58 +08:00
|
|
|
|
2021-02-26 23:33:24 +08:00
|
|
|
assert(AddrAlign >=
|
|
|
|
F->getParent()->getDataLayout().getTypeStoreSize(ResultTy) &&
|
2021-02-09 12:07:12 +08:00
|
|
|
"Expected at least natural alignment at this point.");
|
|
|
|
|
2014-04-03 19:44:58 +08:00
|
|
|
// Given: atomicrmw some_op iN* %addr, iN %incr ordering
|
|
|
|
//
|
|
|
|
// The standard expansion we produce is:
|
|
|
|
// [...]
|
|
|
|
// atomicrmw.start:
|
|
|
|
// %loaded = @load.linked(%addr)
|
|
|
|
// %new = some_op iN %loaded, %incr
|
|
|
|
// %stored = @store_conditional(%new, %addr)
|
|
|
|
// %try_again = icmp i32 ne %stored, 0
|
|
|
|
// br i1 %try_again, label %loop, label %atomicrmw.end
|
|
|
|
// atomicrmw.end:
|
|
|
|
// [...]
|
2016-06-18 02:11:48 +08:00
|
|
|
BasicBlock *ExitBB =
|
|
|
|
BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end");
|
2022-03-17 23:46:12 +08:00
|
|
|
BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
|
2014-04-03 19:44:58 +08:00
|
|
|
|
|
|
|
// The split call above "helpfully" added a branch at the end of BB (to the
|
2016-06-18 02:11:48 +08:00
|
|
|
// wrong place).
|
2014-04-03 19:44:58 +08:00
|
|
|
std::prev(BB->end())->eraseFromParent();
|
|
|
|
Builder.SetInsertPoint(BB);
|
|
|
|
Builder.CreateBr(LoopBB);
|
|
|
|
|
|
|
|
// Start the main loop block now that we've taken care of the preliminaries.
|
|
|
|
Builder.SetInsertPoint(LoopBB);
|
2021-07-02 23:20:41 +08:00
|
|
|
Value *Loaded = TLI->emitLoadLinked(Builder, ResultTy, Addr, MemOpOrder);
|
2014-04-03 19:44:58 +08:00
|
|
|
|
2015-12-03 02:12:57 +08:00
|
|
|
Value *NewVal = PerformOp(Builder, Loaded);
|
2014-04-03 19:44:58 +08:00
|
|
|
|
2014-08-05 05:25:23 +08:00
|
|
|
Value *StoreSuccess =
|
2014-09-04 05:01:03 +08:00
|
|
|
TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
|
2014-04-03 19:44:58 +08:00
|
|
|
Value *TryAgain = Builder.CreateICmpNE(
|
|
|
|
StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
|
|
|
|
Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
|
2016-06-18 02:11:48 +08:00
|
|
|
return Loaded;
|
2014-04-03 19:44:58 +08:00
|
|
|
}
|
|
|
|
|
2016-02-19 08:06:41 +08:00
|
|
|
/// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of
|
|
|
|
/// the equivalent bitwidth. We used to not support pointer cmpxchg in the
|
|
|
|
/// IR. As a migration step, we convert back to what use to be the standard
|
|
|
|
/// way to represent a pointer cmpxchg so that we can update backends one by
|
2018-07-31 03:41:25 +08:00
|
|
|
/// one.
|
2022-03-17 23:46:12 +08:00
|
|
|
AtomicCmpXchgInst *
|
|
|
|
AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
|
2016-02-19 08:06:41 +08:00
|
|
|
auto *M = CI->getModule();
|
|
|
|
Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
|
|
|
|
M->getDataLayout());
|
|
|
|
|
|
|
|
IRBuilder<> Builder(CI);
|
2018-07-31 03:41:25 +08:00
|
|
|
|
2016-02-19 08:06:41 +08:00
|
|
|
Value *Addr = CI->getPointerOperand();
|
2022-03-17 23:46:12 +08:00
|
|
|
Type *PT = PointerType::get(NewTy, Addr->getType()->getPointerAddressSpace());
|
2016-02-19 08:06:41 +08:00
|
|
|
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
|
|
|
|
|
|
|
|
Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy);
|
|
|
|
Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy);
|
2018-07-31 03:41:25 +08:00
|
|
|
|
2021-02-09 12:07:12 +08:00
|
|
|
auto *NewCI = Builder.CreateAtomicCmpXchg(
|
|
|
|
NewAddr, NewCmp, NewNewVal, CI->getAlign(), CI->getSuccessOrdering(),
|
|
|
|
CI->getFailureOrdering(), CI->getSyncScopeID());
|
2016-02-19 08:06:41 +08:00
|
|
|
NewCI->setVolatile(CI->isVolatile());
|
|
|
|
NewCI->setWeak(CI->isWeak());
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");
|
2016-02-19 08:06:41 +08:00
|
|
|
|
|
|
|
Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
|
|
|
|
Value *Succ = Builder.CreateExtractValue(NewCI, 1);
|
|
|
|
|
|
|
|
OldVal = Builder.CreateIntToPtr(OldVal, CI->getCompareOperand()->getType());
|
|
|
|
|
|
|
|
Value *Res = UndefValue::get(CI->getType());
|
|
|
|
Res = Builder.CreateInsertValue(Res, OldVal, 0);
|
|
|
|
Res = Builder.CreateInsertValue(Res, Succ, 1);
|
|
|
|
|
|
|
|
CI->replaceAllUsesWith(Res);
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return NewCI;
|
|
|
|
}
|
|
|
|
|
2014-08-22 05:50:01 +08:00
|
|
|
bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
|
2014-04-03 21:06:54 +08:00
|
|
|
AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
|
|
|
|
AtomicOrdering FailureOrder = CI->getFailureOrdering();
|
2014-04-03 19:44:58 +08:00
|
|
|
Value *Addr = CI->getPointerOperand();
|
|
|
|
BasicBlock *BB = CI->getParent();
|
|
|
|
Function *F = BB->getParent();
|
|
|
|
LLVMContext &Ctx = F->getContext();
|
2016-03-17 06:12:04 +08:00
|
|
|
// If shouldInsertFencesForAtomic() returns true, then the target does not
|
|
|
|
// want to deal with memory orders, and emitLeading/TrailingFence should take
|
|
|
|
// care of everything. Otherwise, emitLeading/TrailingFence are no-op and we
|
2014-09-04 05:29:59 +08:00
|
|
|
// should preserve the ordering.
|
2016-03-17 06:12:04 +08:00
|
|
|
bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI);
|
2021-05-29 05:05:07 +08:00
|
|
|
AtomicOrdering MemOpOrder = ShouldInsertFencesForAtomic
|
|
|
|
? AtomicOrdering::Monotonic
|
|
|
|
: CI->getMergedOrdering();
|
2014-04-03 19:44:58 +08:00
|
|
|
|
2016-02-23 04:55:50 +08:00
|
|
|
// In implementations which use a barrier to achieve release semantics, we can
|
|
|
|
// delay emitting this barrier until we know a store is actually going to be
|
|
|
|
// attempted. The cost of this delay is that we need 2 copies of the block
|
|
|
|
// emitting the load-linked, affecting code size.
|
|
|
|
//
|
|
|
|
// Ideally, this logic would be unconditional except for the minsize check
|
|
|
|
// since in other cases the extra blocks naturally collapse down to the
|
|
|
|
// minimal loop. Unfortunately, this puts too much stress on later
|
|
|
|
// optimisations so we avoid emitting the extra logic in those cases too.
|
2016-03-17 06:12:04 +08:00
|
|
|
bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic &&
|
2016-04-07 05:19:33 +08:00
|
|
|
SuccessOrder != AtomicOrdering::Monotonic &&
|
|
|
|
SuccessOrder != AtomicOrdering::Acquire &&
|
2019-04-05 06:40:06 +08:00
|
|
|
!F->hasMinSize();
|
2016-02-23 04:55:50 +08:00
|
|
|
|
|
|
|
// There's no overhead for sinking the release barrier in a weak cmpxchg, so
|
|
|
|
// do it even on minsize.
|
2019-04-05 06:40:06 +08:00
|
|
|
bool UseUnconditionalReleaseBarrier = F->hasMinSize() && !CI->isWeak();
|
2016-02-23 04:55:50 +08:00
|
|
|
|
2014-04-03 19:44:58 +08:00
|
|
|
// Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
|
|
|
|
//
|
2014-04-03 21:06:54 +08:00
|
|
|
// The full expansion we produce is:
|
2014-04-03 19:44:58 +08:00
|
|
|
// [...]
|
2020-03-24 01:47:32 +08:00
|
|
|
// %aligned.addr = ...
|
2014-04-03 19:44:58 +08:00
|
|
|
// cmpxchg.start:
|
2020-03-24 01:47:32 +08:00
|
|
|
// %unreleasedload = @load.linked(%aligned.addr)
|
|
|
|
// %unreleasedload.extract = extract value from %unreleasedload
|
|
|
|
// %should_store = icmp eq %unreleasedload.extract, %desired
|
|
|
|
// br i1 %should_store, label %cmpxchg.releasingstore,
|
2015-09-23 01:21:44 +08:00
|
|
|
// label %cmpxchg.nostore
|
2016-02-23 04:55:50 +08:00
|
|
|
// cmpxchg.releasingstore:
|
|
|
|
// fence?
|
|
|
|
// br label cmpxchg.trystore
|
2014-04-03 19:44:58 +08:00
|
|
|
// cmpxchg.trystore:
|
2020-03-24 01:47:32 +08:00
|
|
|
// %loaded.trystore = phi [%unreleasedload, %cmpxchg.releasingstore],
|
2016-02-23 04:55:50 +08:00
|
|
|
// [%releasedload, %cmpxchg.releasedload]
|
2020-03-24 01:47:32 +08:00
|
|
|
// %updated.new = insert %new into %loaded.trystore
|
|
|
|
// %stored = @store_conditional(%updated.new, %aligned.addr)
|
2014-06-14 00:45:52 +08:00
|
|
|
// %success = icmp eq i32 %stored, 0
|
2016-02-23 04:55:50 +08:00
|
|
|
// br i1 %success, label %cmpxchg.success,
|
|
|
|
// label %cmpxchg.releasedload/%cmpxchg.failure
|
|
|
|
// cmpxchg.releasedload:
|
2020-03-24 01:47:32 +08:00
|
|
|
// %releasedload = @load.linked(%aligned.addr)
|
|
|
|
// %releasedload.extract = extract value from %releasedload
|
|
|
|
// %should_store = icmp eq %releasedload.extract, %desired
|
2016-02-23 04:55:50 +08:00
|
|
|
// br i1 %should_store, label %cmpxchg.trystore,
|
|
|
|
// label %cmpxchg.failure
|
2014-06-14 00:45:52 +08:00
|
|
|
// cmpxchg.success:
|
|
|
|
// fence?
|
|
|
|
// br label %cmpxchg.end
|
2015-09-23 01:21:44 +08:00
|
|
|
// cmpxchg.nostore:
|
2016-02-23 04:55:50 +08:00
|
|
|
// %loaded.nostore = phi [%unreleasedload, %cmpxchg.start],
|
|
|
|
// [%releasedload,
|
|
|
|
// %cmpxchg.releasedload/%cmpxchg.trystore]
|
2015-09-23 01:21:44 +08:00
|
|
|
// @load_linked_fail_balance()?
|
|
|
|
// br label %cmpxchg.failure
|
2014-06-14 00:45:52 +08:00
|
|
|
// cmpxchg.failure:
|
2014-04-03 19:44:58 +08:00
|
|
|
// fence?
|
2014-04-03 21:06:54 +08:00
|
|
|
// br label %cmpxchg.end
|
|
|
|
// cmpxchg.end:
|
2020-03-24 01:47:32 +08:00
|
|
|
// %loaded.exit = phi [%loaded.nostore, %cmpxchg.failure],
|
|
|
|
// [%loaded.trystore, %cmpxchg.trystore]
|
2014-06-14 00:45:52 +08:00
|
|
|
// %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
|
2020-03-24 01:47:32 +08:00
|
|
|
// %loaded = extract value from %loaded.exit
|
2014-06-14 00:45:52 +08:00
|
|
|
// %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
|
|
|
|
// %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
|
2014-04-03 19:44:58 +08:00
|
|
|
// [...]
|
2015-10-10 00:54:49 +08:00
|
|
|
BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end");
|
2014-06-14 00:45:52 +08:00
|
|
|
auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
|
2015-09-23 01:21:44 +08:00
|
|
|
auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB);
|
|
|
|
auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB);
|
2016-02-23 04:55:50 +08:00
|
|
|
auto ReleasedLoadBB =
|
|
|
|
BasicBlock::Create(Ctx, "cmpxchg.releasedload", F, SuccessBB);
|
|
|
|
auto TryStoreBB =
|
|
|
|
BasicBlock::Create(Ctx, "cmpxchg.trystore", F, ReleasedLoadBB);
|
|
|
|
auto ReleasingStoreBB =
|
|
|
|
BasicBlock::Create(Ctx, "cmpxchg.fencedstore", F, TryStoreBB);
|
|
|
|
auto StartBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, ReleasingStoreBB);
|
2014-04-03 19:44:58 +08:00
|
|
|
|
|
|
|
// This grabs the DebugLoc from CI
|
|
|
|
IRBuilder<> Builder(CI);
|
|
|
|
|
|
|
|
// The split call above "helpfully" added a branch at the end of BB (to the
|
|
|
|
// wrong place), but we might want a fence too. It's easiest to just remove
|
|
|
|
// the branch entirely.
|
|
|
|
std::prev(BB->end())->eraseFromParent();
|
|
|
|
Builder.SetInsertPoint(BB);
|
2016-03-17 06:12:04 +08:00
|
|
|
if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
|
2017-05-09 23:27:17 +08:00
|
|
|
TLI->emitLeadingFence(Builder, CI, SuccessOrder);
|
2020-03-24 01:47:32 +08:00
|
|
|
|
|
|
|
PartwordMaskValues PMV =
|
|
|
|
createMaskInstrs(Builder, CI, CI->getCompareOperand()->getType(), Addr,
|
2021-02-09 12:07:12 +08:00
|
|
|
CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
|
2016-02-23 04:55:50 +08:00
|
|
|
Builder.CreateBr(StartBB);
|
2014-04-03 19:44:58 +08:00
|
|
|
|
|
|
|
// Start the main loop block now that we've taken care of the preliminaries.
|
2016-02-23 04:55:50 +08:00
|
|
|
Builder.SetInsertPoint(StartBB);
|
2020-03-24 01:47:32 +08:00
|
|
|
Value *UnreleasedLoad =
|
2021-07-02 23:20:41 +08:00
|
|
|
TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
|
2020-03-24 01:47:32 +08:00
|
|
|
Value *UnreleasedLoadExtract =
|
|
|
|
extractMaskedValue(Builder, UnreleasedLoad, PMV);
|
2016-02-23 04:55:50 +08:00
|
|
|
Value *ShouldStore = Builder.CreateICmpEQ(
|
2020-03-24 01:47:32 +08:00
|
|
|
UnreleasedLoadExtract, CI->getCompareOperand(), "should_store");
|
2014-04-03 21:06:54 +08:00
|
|
|
|
2015-06-19 09:53:21 +08:00
|
|
|
// If the cmpxchg doesn't actually need any ordering when it fails, we can
|
2014-04-03 21:06:54 +08:00
|
|
|
// jump straight past that fence instruction (if it exists).
|
2016-02-23 04:55:50 +08:00
|
|
|
Builder.CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ReleasingStoreBB);
|
2016-03-17 06:12:04 +08:00
|
|
|
if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
|
2017-05-09 23:27:17 +08:00
|
|
|
TLI->emitLeadingFence(Builder, CI, SuccessOrder);
|
2016-02-23 04:55:50 +08:00
|
|
|
Builder.CreateBr(TryStoreBB);
|
2014-04-03 19:44:58 +08:00
|
|
|
|
|
|
|
Builder.SetInsertPoint(TryStoreBB);
|
2020-03-24 01:47:32 +08:00
|
|
|
PHINode *LoadedTryStore =
|
|
|
|
Builder.CreatePHI(PMV.WordType, 2, "loaded.trystore");
|
|
|
|
LoadedTryStore->addIncoming(UnreleasedLoad, ReleasingStoreBB);
|
|
|
|
Value *NewValueInsert =
|
|
|
|
insertMaskedValue(Builder, LoadedTryStore, CI->getNewValOperand(), PMV);
|
2022-03-17 23:46:12 +08:00
|
|
|
Value *StoreSuccess = TLI->emitStoreConditional(Builder, NewValueInsert,
|
|
|
|
PMV.AlignedAddr, MemOpOrder);
|
2014-06-14 00:45:36 +08:00
|
|
|
StoreSuccess = Builder.CreateICmpEQ(
|
2014-04-03 19:44:58 +08:00
|
|
|
StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
|
2016-02-23 04:55:50 +08:00
|
|
|
BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
|
2014-06-14 00:45:52 +08:00
|
|
|
Builder.CreateCondBr(StoreSuccess, SuccessBB,
|
2016-02-23 04:55:50 +08:00
|
|
|
CI->isWeak() ? FailureBB : RetryBB);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ReleasedLoadBB);
|
|
|
|
Value *SecondLoad;
|
|
|
|
if (HasReleasedLoadBB) {
|
2021-07-02 23:20:41 +08:00
|
|
|
SecondLoad =
|
|
|
|
TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
|
2020-03-24 01:47:32 +08:00
|
|
|
Value *SecondLoadExtract = extractMaskedValue(Builder, SecondLoad, PMV);
|
|
|
|
ShouldStore = Builder.CreateICmpEQ(SecondLoadExtract,
|
|
|
|
CI->getCompareOperand(), "should_store");
|
2016-02-23 04:55:50 +08:00
|
|
|
|
|
|
|
// If the cmpxchg doesn't actually need any ordering when it fails, we can
|
|
|
|
// jump straight past that fence instruction (if it exists).
|
|
|
|
Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
|
2020-03-24 01:47:32 +08:00
|
|
|
// Update PHI node in TryStoreBB.
|
|
|
|
LoadedTryStore->addIncoming(SecondLoad, ReleasedLoadBB);
|
2016-02-23 04:55:50 +08:00
|
|
|
} else
|
|
|
|
Builder.CreateUnreachable();
|
|
|
|
|
|
|
|
// Make sure later instructions don't get reordered with a fence if
|
|
|
|
// necessary.
|
2014-06-14 00:45:52 +08:00
|
|
|
Builder.SetInsertPoint(SuccessBB);
|
2016-03-17 06:12:04 +08:00
|
|
|
if (ShouldInsertFencesForAtomic)
|
2017-05-09 23:27:17 +08:00
|
|
|
TLI->emitTrailingFence(Builder, CI, SuccessOrder);
|
2014-04-03 21:06:54 +08:00
|
|
|
Builder.CreateBr(ExitBB);
|
2014-04-03 19:44:58 +08:00
|
|
|
|
2015-09-23 01:21:44 +08:00
|
|
|
Builder.SetInsertPoint(NoStoreBB);
|
2020-03-24 01:47:32 +08:00
|
|
|
PHINode *LoadedNoStore =
|
|
|
|
Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.nostore");
|
|
|
|
LoadedNoStore->addIncoming(UnreleasedLoad, StartBB);
|
|
|
|
if (HasReleasedLoadBB)
|
|
|
|
LoadedNoStore->addIncoming(SecondLoad, ReleasedLoadBB);
|
|
|
|
|
2015-09-23 01:21:44 +08:00
|
|
|
// In the failing case, where we don't execute the store-conditional, the
|
|
|
|
// target might want to balance out the load-linked with a dedicated
|
|
|
|
// instruction (e.g., on ARM, clearing the exclusive monitor).
|
|
|
|
TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
|
|
|
|
Builder.CreateBr(FailureBB);
|
|
|
|
|
2014-06-14 00:45:52 +08:00
|
|
|
Builder.SetInsertPoint(FailureBB);
|
2020-03-24 01:47:32 +08:00
|
|
|
PHINode *LoadedFailure =
|
|
|
|
Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.failure");
|
|
|
|
LoadedFailure->addIncoming(LoadedNoStore, NoStoreBB);
|
|
|
|
if (CI->isWeak())
|
|
|
|
LoadedFailure->addIncoming(LoadedTryStore, TryStoreBB);
|
2016-03-17 06:12:04 +08:00
|
|
|
if (ShouldInsertFencesForAtomic)
|
2017-05-09 23:27:17 +08:00
|
|
|
TLI->emitTrailingFence(Builder, CI, FailureOrder);
|
2014-06-14 00:45:52 +08:00
|
|
|
Builder.CreateBr(ExitBB);
|
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
// Finally, we have control-flow based knowledge of whether the cmpxchg
|
|
|
|
// succeeded or not. We expose this to later passes by converting any
|
2016-02-23 04:55:50 +08:00
|
|
|
// subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate
|
|
|
|
// PHI.
|
2014-06-14 00:45:52 +08:00
|
|
|
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
|
2020-03-24 01:47:32 +08:00
|
|
|
PHINode *LoadedExit =
|
|
|
|
Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.exit");
|
|
|
|
LoadedExit->addIncoming(LoadedTryStore, SuccessBB);
|
|
|
|
LoadedExit->addIncoming(LoadedFailure, FailureBB);
|
|
|
|
PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2, "success");
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
|
2014-06-14 00:45:52 +08:00
|
|
|
Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
|
2014-05-30 18:09:59 +08:00
|
|
|
|
2020-03-24 01:47:32 +08:00
|
|
|
// This is the "exit value" from the cmpxchg expansion. It may be of
|
|
|
|
// a type wider than the one in the cmpxchg instruction.
|
|
|
|
Value *LoadedFull = LoadedExit;
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ExitBB, std::next(Success->getIterator()));
|
|
|
|
Value *Loaded = extractMaskedValue(Builder, LoadedFull, PMV);
|
2016-02-23 04:55:50 +08:00
|
|
|
|
2014-05-30 18:09:59 +08:00
|
|
|
// Look for any users of the cmpxchg that are just comparing the loaded value
|
|
|
|
// against the desired one, and replace them with the CFG-derived version.
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
SmallVector<ExtractValueInst *, 2> PrunedInsts;
|
2014-05-30 18:09:59 +08:00
|
|
|
for (auto User : CI->users()) {
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
|
|
|
|
if (!EV)
|
2014-05-30 18:09:59 +08:00
|
|
|
continue;
|
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
|
|
|
|
"weird extraction from { iN, i1 }");
|
2014-05-30 18:09:59 +08:00
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
if (EV->getIndices()[0] == 0)
|
|
|
|
EV->replaceAllUsesWith(Loaded);
|
|
|
|
else
|
|
|
|
EV->replaceAllUsesWith(Success);
|
|
|
|
|
|
|
|
PrunedInsts.push_back(EV);
|
2014-05-30 18:09:59 +08:00
|
|
|
}
|
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
// We can remove the instructions now we're no longer iterating through them.
|
|
|
|
for (auto EV : PrunedInsts)
|
|
|
|
EV->eraseFromParent();
|
2014-04-03 19:44:58 +08:00
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
if (!CI->use_empty()) {
|
|
|
|
// Some use of the full struct return that we don't understand has happened,
|
|
|
|
// so we've got to reconstruct it properly.
|
|
|
|
Value *Res;
|
|
|
|
Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
|
|
|
|
Res = Builder.CreateInsertValue(Res, Success, 1);
|
|
|
|
|
|
|
|
CI->replaceAllUsesWith(Res);
|
|
|
|
}
|
|
|
|
|
|
|
|
CI->eraseFromParent();
|
2014-04-03 19:44:58 +08:00
|
|
|
return true;
|
|
|
|
}
|
2014-09-26 01:27:43 +08:00
|
|
|
|
2022-03-17 23:46:12 +08:00
|
|
|
bool AtomicExpand::isIdempotentRMW(AtomicRMWInst *RMWI) {
|
2014-09-26 01:27:43 +08:00
|
|
|
auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
|
2022-03-17 23:46:12 +08:00
|
|
|
if (!C)
|
2014-09-26 01:27:43 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
AtomicRMWInst::BinOp Op = RMWI->getOperation();
|
2022-03-17 23:46:12 +08:00
|
|
|
switch (Op) {
|
|
|
|
case AtomicRMWInst::Add:
|
|
|
|
case AtomicRMWInst::Sub:
|
|
|
|
case AtomicRMWInst::Or:
|
|
|
|
case AtomicRMWInst::Xor:
|
|
|
|
return C->isZero();
|
|
|
|
case AtomicRMWInst::And:
|
|
|
|
return C->isMinusOne();
|
|
|
|
// FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
|
|
|
|
default:
|
|
|
|
return false;
|
2014-09-26 01:27:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-17 23:46:12 +08:00
|
|
|
bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
|
2015-09-13 02:51:23 +08:00
|
|
|
if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
|
|
|
|
tryExpandAtomicLoad(ResultingLoad);
|
|
|
|
return true;
|
|
|
|
}
|
2014-09-26 01:27:43 +08:00
|
|
|
return false;
|
|
|
|
}
|
2015-08-03 23:29:47 +08:00
|
|
|
|
2016-06-18 02:11:48 +08:00
|
|
|
Value *AtomicExpand::insertRMWCmpXchgLoop(
|
2021-02-09 12:07:12 +08:00
|
|
|
IRBuilder<> &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
|
2021-04-06 05:45:33 +08:00
|
|
|
AtomicOrdering MemOpOrder, SyncScope::ID SSID,
|
2016-06-18 02:11:48 +08:00
|
|
|
function_ref<Value *(IRBuilder<> &, Value *)> PerformOp,
|
|
|
|
CreateCmpXchgInstFun CreateCmpXchg) {
|
|
|
|
LLVMContext &Ctx = Builder.getContext();
|
|
|
|
BasicBlock *BB = Builder.GetInsertBlock();
|
2015-08-03 23:29:47 +08:00
|
|
|
Function *F = BB->getParent();
|
|
|
|
|
|
|
|
// Given: atomicrmw some_op iN* %addr, iN %incr ordering
|
|
|
|
//
|
|
|
|
// The standard expansion we produce is:
|
|
|
|
// [...]
|
|
|
|
// %init_loaded = load atomic iN* %addr
|
|
|
|
// br label %loop
|
|
|
|
// loop:
|
|
|
|
// %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
|
|
|
|
// %new = some_op iN %loaded, %incr
|
|
|
|
// %pair = cmpxchg iN* %addr, iN %loaded, iN %new
|
|
|
|
// %new_loaded = extractvalue { iN, i1 } %pair, 0
|
|
|
|
// %success = extractvalue { iN, i1 } %pair, 1
|
|
|
|
// br i1 %success, label %atomicrmw.end, label %loop
|
|
|
|
// atomicrmw.end:
|
|
|
|
// [...]
|
2016-06-18 02:11:48 +08:00
|
|
|
BasicBlock *ExitBB =
|
|
|
|
BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end");
|
2015-08-03 23:29:47 +08:00
|
|
|
BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
|
|
|
|
|
|
|
|
// The split call above "helpfully" added a branch at the end of BB (to the
|
|
|
|
// wrong place), but we want a load. It's easiest to just remove
|
|
|
|
// the branch entirely.
|
|
|
|
std::prev(BB->end())->eraseFromParent();
|
|
|
|
Builder.SetInsertPoint(BB);
|
2021-02-09 12:07:12 +08:00
|
|
|
LoadInst *InitLoaded = Builder.CreateAlignedLoad(ResultTy, Addr, AddrAlign);
|
2015-08-03 23:29:47 +08:00
|
|
|
Builder.CreateBr(LoopBB);
|
|
|
|
|
|
|
|
// Start the main loop block now that we've taken care of the preliminaries.
|
|
|
|
Builder.SetInsertPoint(LoopBB);
|
2016-06-18 02:11:48 +08:00
|
|
|
PHINode *Loaded = Builder.CreatePHI(ResultTy, 2, "loaded");
|
2015-08-03 23:29:47 +08:00
|
|
|
Loaded->addIncoming(InitLoaded, BB);
|
|
|
|
|
2016-06-18 02:11:48 +08:00
|
|
|
Value *NewVal = PerformOp(Builder, Loaded);
|
2015-08-03 23:29:47 +08:00
|
|
|
|
|
|
|
Value *NewLoaded = nullptr;
|
|
|
|
Value *Success = nullptr;
|
|
|
|
|
2021-02-09 12:07:12 +08:00
|
|
|
CreateCmpXchg(Builder, Addr, Loaded, NewVal, AddrAlign,
|
2016-06-18 02:11:48 +08:00
|
|
|
MemOpOrder == AtomicOrdering::Unordered
|
|
|
|
? AtomicOrdering::Monotonic
|
|
|
|
: MemOpOrder,
|
2021-04-06 05:45:33 +08:00
|
|
|
SSID, Success, NewLoaded);
|
2015-08-03 23:29:47 +08:00
|
|
|
assert(Success && NewLoaded);
|
|
|
|
|
|
|
|
Loaded->addIncoming(NewLoaded, LoopBB);
|
|
|
|
|
|
|
|
Builder.CreateCondBr(Success, ExitBB, LoopBB);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
|
2016-06-18 02:11:48 +08:00
|
|
|
return NewLoaded;
|
|
|
|
}
|
2015-08-03 23:29:47 +08:00
|
|
|
|
2018-09-19 22:51:42 +08:00
|
|
|
bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
|
|
|
|
unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
|
|
|
|
unsigned ValueSize = getAtomicOpSize(CI);
|
|
|
|
|
|
|
|
switch (TLI->shouldExpandAtomicCmpXchgInIR(CI)) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unhandled case in tryExpandAtomicCmpXchg");
|
|
|
|
case TargetLoweringBase::AtomicExpansionKind::None:
|
|
|
|
if (ValueSize < MinCASSize)
|
2020-07-09 15:36:41 +08:00
|
|
|
return expandPartwordCmpXchg(CI);
|
2018-09-19 22:51:42 +08:00
|
|
|
return false;
|
|
|
|
case TargetLoweringBase::AtomicExpansionKind::LLSC: {
|
|
|
|
return expandAtomicCmpXchg(CI);
|
|
|
|
}
|
|
|
|
case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic:
|
2018-11-30 04:43:42 +08:00
|
|
|
expandAtomicCmpXchgToMaskedIntrinsic(CI);
|
|
|
|
return true;
|
2018-09-19 22:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-18 02:11:48 +08:00
|
|
|
// Note: This function is exposed externally by AtomicExpandUtils.h
|
|
|
|
bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
|
|
|
|
CreateCmpXchgInstFun CreateCmpXchg) {
|
|
|
|
IRBuilder<> Builder(AI);
|
|
|
|
Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop(
|
2021-02-09 12:07:12 +08:00
|
|
|
Builder, AI->getType(), AI->getPointerOperand(), AI->getAlign(),
|
2021-04-06 05:45:33 +08:00
|
|
|
AI->getOrdering(), AI->getSyncScopeID(),
|
2016-06-18 02:11:48 +08:00
|
|
|
[&](IRBuilder<> &Builder, Value *Loaded) {
|
|
|
|
return performAtomicOp(AI->getOperation(), Builder, Loaded,
|
|
|
|
AI->getValOperand());
|
|
|
|
},
|
|
|
|
CreateCmpXchg);
|
|
|
|
|
|
|
|
AI->replaceAllUsesWith(Loaded);
|
2015-08-03 23:29:47 +08:00
|
|
|
AI->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
|
|
|
|
// In order to use one of the sized library calls such as
|
|
|
|
// __atomic_fetch_add_4, the alignment must be sufficient, the size
|
|
|
|
// must be one of the potentially-specialized sizes, and the value
|
|
|
|
// type must actually exist in C on the target (otherwise, the
|
|
|
|
// function wouldn't actually be defined.)
|
2020-06-30 17:54:45 +08:00
|
|
|
static bool canUseSizedAtomicCall(unsigned Size, Align Alignment,
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
const DataLayout &DL) {
|
|
|
|
// TODO: "LargestSize" is an approximation for "largest type that
|
|
|
|
// you can express in C". It seems to be the case that int128 is
|
|
|
|
// supported on all 64-bit platforms, otherwise only up to 64-bit
|
|
|
|
// integers are supported. If we get this wrong, then we'll try to
|
|
|
|
// call a sized libcall that doesn't actually exist. There should
|
|
|
|
// really be some more reliable way in LLVM of determining integer
|
|
|
|
// sizes which are valid in the target's C ABI...
|
2016-05-14 02:38:35 +08:00
|
|
|
unsigned LargestSize = DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
|
2020-06-30 17:54:45 +08:00
|
|
|
return Alignment >= Size &&
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
(Size == 1 || Size == 2 || Size == 4 || Size == 8 || Size == 16) &&
|
|
|
|
Size <= LargestSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) {
|
|
|
|
static const RTLIB::Libcall Libcalls[6] = {
|
|
|
|
RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
|
|
|
|
RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
|
|
|
|
unsigned Size = getAtomicOpSize(I);
|
|
|
|
|
|
|
|
bool expanded = expandAtomicOpToLibcall(
|
2020-06-30 17:54:45 +08:00
|
|
|
I, Size, I->getAlign(), I->getPointerOperand(), nullptr, nullptr,
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
|
2020-11-03 01:33:07 +08:00
|
|
|
if (!expanded)
|
|
|
|
report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load");
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) {
|
|
|
|
static const RTLIB::Libcall Libcalls[6] = {
|
|
|
|
RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
|
|
|
|
RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
|
|
|
|
unsigned Size = getAtomicOpSize(I);
|
|
|
|
|
|
|
|
bool expanded = expandAtomicOpToLibcall(
|
2020-06-30 17:54:45 +08:00
|
|
|
I, Size, I->getAlign(), I->getPointerOperand(), I->getValueOperand(),
|
|
|
|
nullptr, I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
|
2020-11-03 01:33:07 +08:00
|
|
|
if (!expanded)
|
|
|
|
report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store");
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
|
|
|
|
static const RTLIB::Libcall Libcalls[6] = {
|
|
|
|
RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
|
|
|
|
RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
|
|
|
|
RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16};
|
|
|
|
unsigned Size = getAtomicOpSize(I);
|
|
|
|
|
|
|
|
bool expanded = expandAtomicOpToLibcall(
|
2020-06-30 17:54:45 +08:00
|
|
|
I, Size, I->getAlign(), I->getPointerOperand(), I->getNewValOperand(),
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
I->getCompareOperand(), I->getSuccessOrdering(), I->getFailureOrdering(),
|
|
|
|
Libcalls);
|
2020-11-03 01:33:07 +08:00
|
|
|
if (!expanded)
|
|
|
|
report_fatal_error("expandAtomicOpToLibcall shouldn't fail for CAS");
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
|
|
|
|
static const RTLIB::Libcall LibcallsXchg[6] = {
|
|
|
|
RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1,
|
|
|
|
RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4,
|
|
|
|
RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16};
|
|
|
|
static const RTLIB::Libcall LibcallsAdd[6] = {
|
|
|
|
RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1,
|
|
|
|
RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4,
|
|
|
|
RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16};
|
|
|
|
static const RTLIB::Libcall LibcallsSub[6] = {
|
|
|
|
RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1,
|
|
|
|
RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4,
|
|
|
|
RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16};
|
|
|
|
static const RTLIB::Libcall LibcallsAnd[6] = {
|
|
|
|
RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1,
|
|
|
|
RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4,
|
|
|
|
RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16};
|
|
|
|
static const RTLIB::Libcall LibcallsOr[6] = {
|
|
|
|
RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1,
|
|
|
|
RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4,
|
|
|
|
RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16};
|
|
|
|
static const RTLIB::Libcall LibcallsXor[6] = {
|
|
|
|
RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1,
|
|
|
|
RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4,
|
|
|
|
RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16};
|
|
|
|
static const RTLIB::Libcall LibcallsNand[6] = {
|
|
|
|
RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1,
|
|
|
|
RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4,
|
|
|
|
RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16};
|
|
|
|
|
|
|
|
switch (Op) {
|
|
|
|
case AtomicRMWInst::BAD_BINOP:
|
|
|
|
llvm_unreachable("Should not have BAD_BINOP.");
|
|
|
|
case AtomicRMWInst::Xchg:
|
|
|
|
return makeArrayRef(LibcallsXchg);
|
|
|
|
case AtomicRMWInst::Add:
|
|
|
|
return makeArrayRef(LibcallsAdd);
|
|
|
|
case AtomicRMWInst::Sub:
|
|
|
|
return makeArrayRef(LibcallsSub);
|
|
|
|
case AtomicRMWInst::And:
|
|
|
|
return makeArrayRef(LibcallsAnd);
|
|
|
|
case AtomicRMWInst::Or:
|
|
|
|
return makeArrayRef(LibcallsOr);
|
|
|
|
case AtomicRMWInst::Xor:
|
|
|
|
return makeArrayRef(LibcallsXor);
|
|
|
|
case AtomicRMWInst::Nand:
|
|
|
|
return makeArrayRef(LibcallsNand);
|
|
|
|
case AtomicRMWInst::Max:
|
|
|
|
case AtomicRMWInst::Min:
|
|
|
|
case AtomicRMWInst::UMax:
|
|
|
|
case AtomicRMWInst::UMin:
|
2019-01-23 02:18:02 +08:00
|
|
|
case AtomicRMWInst::FAdd:
|
|
|
|
case AtomicRMWInst::FSub:
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
// No atomic libcalls are available for max/min/umax/umin.
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
llvm_unreachable("Unexpected AtomicRMW operation.");
|
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
|
|
|
|
ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation());
|
|
|
|
|
|
|
|
unsigned Size = getAtomicOpSize(I);
|
|
|
|
|
|
|
|
bool Success = false;
|
|
|
|
if (!Libcalls.empty())
|
|
|
|
Success = expandAtomicOpToLibcall(
|
2020-06-30 17:54:45 +08:00
|
|
|
I, Size, I->getAlign(), I->getPointerOperand(), I->getValOperand(),
|
|
|
|
nullptr, I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
|
|
|
|
// The expansion failed: either there were no libcalls at all for
|
|
|
|
// the operation (min/max), or there were only size-specialized
|
|
|
|
// libcalls (add/sub/etc) and we needed a generic. So, expand to a
|
|
|
|
// CAS libcall, via a CAS loop, instead.
|
|
|
|
if (!Success) {
|
2021-02-09 12:07:12 +08:00
|
|
|
expandAtomicRMWToCmpXchg(
|
|
|
|
I, [this](IRBuilder<> &Builder, Value *Addr, Value *Loaded,
|
|
|
|
Value *NewVal, Align Alignment, AtomicOrdering MemOpOrder,
|
2021-04-06 05:45:33 +08:00
|
|
|
SyncScope::ID SSID, Value *&Success, Value *&NewLoaded) {
|
2021-02-09 12:07:12 +08:00
|
|
|
// Create the CAS instruction normally...
|
|
|
|
AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg(
|
|
|
|
Addr, Loaded, NewVal, Alignment, MemOpOrder,
|
2021-04-06 05:45:33 +08:00
|
|
|
AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder), SSID);
|
2021-02-09 12:07:12 +08:00
|
|
|
Success = Builder.CreateExtractValue(Pair, 1, "success");
|
|
|
|
NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
|
|
|
|
|
|
|
|
// ...and then expand the CAS into a libcall.
|
|
|
|
expandAtomicCASToLibcall(Pair);
|
|
|
|
});
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A helper routine for the above expandAtomic*ToLibcall functions.
|
|
|
|
//
|
|
|
|
// 'Libcalls' contains an array of enum values for the particular
|
|
|
|
// ATOMIC libcalls to be emitted. All of the other arguments besides
|
|
|
|
// 'I' are extracted from the Instruction subclass by the
|
|
|
|
// caller. Depending on the particular call, some will be null.
|
|
|
|
bool AtomicExpand::expandAtomicOpToLibcall(
|
2020-06-30 17:54:45 +08:00
|
|
|
Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand,
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering,
|
|
|
|
AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) {
|
|
|
|
assert(Libcalls.size() == 6);
|
|
|
|
|
|
|
|
LLVMContext &Ctx = I->getContext();
|
|
|
|
Module *M = I->getModule();
|
|
|
|
const DataLayout &DL = M->getDataLayout();
|
|
|
|
IRBuilder<> Builder(I);
|
|
|
|
IRBuilder<> AllocaBuilder(&I->getFunction()->getEntryBlock().front());
|
|
|
|
|
2020-06-30 17:54:45 +08:00
|
|
|
bool UseSizedLibcall = canUseSizedAtomicCall(Size, Alignment, DL);
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8);
|
|
|
|
|
2020-06-30 17:54:45 +08:00
|
|
|
const Align AllocaAlignment = DL.getPrefTypeAlign(SizedIntTy);
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
|
|
|
|
// TODO: the "order" argument type is "int", not int32. So
|
|
|
|
// getInt32Ty may be wrong if the arch uses e.g. 16-bit ints.
|
|
|
|
ConstantInt *SizeVal64 = ConstantInt::get(Type::getInt64Ty(Ctx), Size);
|
2016-04-19 02:01:43 +08:00
|
|
|
assert(Ordering != AtomicOrdering::NotAtomic && "expect atomic MO");
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
Constant *OrderingVal =
|
2016-04-19 02:01:43 +08:00
|
|
|
ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering));
|
|
|
|
Constant *Ordering2Val = nullptr;
|
|
|
|
if (CASExpected) {
|
|
|
|
assert(Ordering2 != AtomicOrdering::NotAtomic && "expect atomic MO");
|
|
|
|
Ordering2Val =
|
|
|
|
ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering2));
|
|
|
|
}
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
bool HasResult = I->getType() != Type::getVoidTy(Ctx);
|
|
|
|
|
|
|
|
RTLIB::Libcall RTLibType;
|
|
|
|
if (UseSizedLibcall) {
|
|
|
|
switch (Size) {
|
2022-03-17 23:46:12 +08:00
|
|
|
case 1:
|
|
|
|
RTLibType = Libcalls[1];
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
RTLibType = Libcalls[2];
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
RTLibType = Libcalls[3];
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
RTLibType = Libcalls[4];
|
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
RTLibType = Libcalls[5];
|
|
|
|
break;
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
}
|
|
|
|
} else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) {
|
|
|
|
RTLibType = Libcalls[0];
|
|
|
|
} else {
|
|
|
|
// Can't use sized function, and there's no generic for this
|
|
|
|
// operation, so give up.
|
2020-11-03 01:33:07 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!TLI->getLibcallName(RTLibType)) {
|
|
|
|
// This target does not implement the requested atomic libcall so give up.
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build up the function call. There's two kinds. First, the sized
|
|
|
|
// variants. These calls are going to be one of the following (with
|
|
|
|
// N=1,2,4,8,16):
|
|
|
|
// iN __atomic_load_N(iN *ptr, int ordering)
|
|
|
|
// void __atomic_store_N(iN *ptr, iN val, int ordering)
|
|
|
|
// iN __atomic_{exchange|fetch_*}_N(iN *ptr, iN val, int ordering)
|
|
|
|
// bool __atomic_compare_exchange_N(iN *ptr, iN *expected, iN desired,
|
|
|
|
// int success_order, int failure_order)
|
|
|
|
//
|
|
|
|
// Note that these functions can be used for non-integer atomic
|
|
|
|
// operations, the values just need to be bitcast to integers on the
|
|
|
|
// way in and out.
|
|
|
|
//
|
|
|
|
// And, then, the generic variants. They look like the following:
|
|
|
|
// void __atomic_load(size_t size, void *ptr, void *ret, int ordering)
|
|
|
|
// void __atomic_store(size_t size, void *ptr, void *val, int ordering)
|
|
|
|
// void __atomic_exchange(size_t size, void *ptr, void *val, void *ret,
|
|
|
|
// int ordering)
|
|
|
|
// bool __atomic_compare_exchange(size_t size, void *ptr, void *expected,
|
|
|
|
// void *desired, int success_order,
|
|
|
|
// int failure_order)
|
|
|
|
//
|
|
|
|
// The different signatures are built up depending on the
|
|
|
|
// 'UseSizedLibcall', 'CASExpected', 'ValueOperand', and 'HasResult'
|
|
|
|
// variables.
|
|
|
|
|
|
|
|
AllocaInst *AllocaCASExpected = nullptr;
|
|
|
|
Value *AllocaCASExpected_i8 = nullptr;
|
|
|
|
AllocaInst *AllocaValue = nullptr;
|
|
|
|
Value *AllocaValue_i8 = nullptr;
|
|
|
|
AllocaInst *AllocaResult = nullptr;
|
|
|
|
Value *AllocaResult_i8 = nullptr;
|
|
|
|
|
|
|
|
Type *ResultTy;
|
|
|
|
SmallVector<Value *, 6> Args;
|
Rename AttributeSet to AttributeList
Summary:
This class is a list of AttributeSetNodes corresponding the function
prototype of a call or function declaration. This class used to be
called ParamAttrListPtr, then AttrListPtr, then AttributeSet. It is
typically accessed by parameter and return value index, so
"AttributeList" seems like a more intuitive name.
Rename AttributeSetImpl to AttributeListImpl to follow suit.
It's useful to rename this class so that we can rename AttributeSetNode
to AttributeSet later. AttributeSet is the set of attributes that apply
to a single function, argument, or return value.
Reviewers: sanjoy, javed.absar, chandlerc, pete
Reviewed By: pete
Subscribers: pete, jholewinski, arsenm, dschuff, mehdi_amini, jfb, nhaehnle, sbc100, void, llvm-commits
Differential Revision: https://reviews.llvm.org/D31102
llvm-svn: 298393
2017-03-22 00:57:19 +08:00
|
|
|
AttributeList Attr;
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
|
|
|
|
// 'size' argument.
|
|
|
|
if (!UseSizedLibcall) {
|
|
|
|
// Note, getIntPtrType is assumed equivalent to size_t.
|
|
|
|
Args.push_back(ConstantInt::get(DL.getIntPtrType(Ctx), Size));
|
|
|
|
}
|
|
|
|
|
|
|
|
// 'ptr' argument.
|
2019-03-07 03:27:13 +08:00
|
|
|
// note: This assumes all address spaces share a common libfunc
|
|
|
|
// implementation and that addresses are convertable. For systems without
|
|
|
|
// that property, we'd need to extend this mechanism to support AS-specific
|
|
|
|
// families of atomic intrinsics.
|
|
|
|
auto PtrTypeAS = PointerOperand->getType()->getPointerAddressSpace();
|
2022-03-17 23:46:12 +08:00
|
|
|
Value *PtrVal =
|
|
|
|
Builder.CreateBitCast(PointerOperand, Type::getInt8PtrTy(Ctx, PtrTypeAS));
|
2019-03-07 03:27:13 +08:00
|
|
|
PtrVal = Builder.CreateAddrSpaceCast(PtrVal, Type::getInt8PtrTy(Ctx));
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
Args.push_back(PtrVal);
|
|
|
|
|
|
|
|
// 'expected' argument, if present.
|
|
|
|
if (CASExpected) {
|
|
|
|
AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
|
2020-01-23 18:33:12 +08:00
|
|
|
AllocaCASExpected->setAlignment(AllocaAlignment);
|
2022-03-17 23:46:12 +08:00
|
|
|
unsigned AllocaAS = AllocaCASExpected->getType()->getPointerAddressSpace();
|
2019-06-11 09:35:07 +08:00
|
|
|
|
2022-03-17 23:46:12 +08:00
|
|
|
AllocaCASExpected_i8 = Builder.CreateBitCast(
|
|
|
|
AllocaCASExpected, Type::getInt8PtrTy(Ctx, AllocaAS));
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
Builder.CreateLifetimeStart(AllocaCASExpected_i8, SizeVal64);
|
|
|
|
Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment);
|
|
|
|
Args.push_back(AllocaCASExpected_i8);
|
|
|
|
}
|
|
|
|
|
|
|
|
// 'val' argument ('desired' for cas), if present.
|
|
|
|
if (ValueOperand) {
|
|
|
|
if (UseSizedLibcall) {
|
|
|
|
Value *IntValue =
|
|
|
|
Builder.CreateBitOrPointerCast(ValueOperand, SizedIntTy);
|
|
|
|
Args.push_back(IntValue);
|
|
|
|
} else {
|
|
|
|
AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType());
|
2020-01-23 18:33:12 +08:00
|
|
|
AllocaValue->setAlignment(AllocaAlignment);
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
AllocaValue_i8 =
|
|
|
|
Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx));
|
|
|
|
Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64);
|
|
|
|
Builder.CreateAlignedStore(ValueOperand, AllocaValue, AllocaAlignment);
|
|
|
|
Args.push_back(AllocaValue_i8);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// 'ret' argument.
|
|
|
|
if (!CASExpected && HasResult && !UseSizedLibcall) {
|
|
|
|
AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
|
2020-01-23 18:33:12 +08:00
|
|
|
AllocaResult->setAlignment(AllocaAlignment);
|
2022-03-17 23:46:12 +08:00
|
|
|
unsigned AllocaAS = AllocaResult->getType()->getPointerAddressSpace();
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
AllocaResult_i8 =
|
2022-03-17 23:46:12 +08:00
|
|
|
Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS));
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
Builder.CreateLifetimeStart(AllocaResult_i8, SizeVal64);
|
|
|
|
Args.push_back(AllocaResult_i8);
|
|
|
|
}
|
|
|
|
|
|
|
|
// 'ordering' ('success_order' for cas) argument.
|
|
|
|
Args.push_back(OrderingVal);
|
|
|
|
|
|
|
|
// 'failure_order' argument, if present.
|
|
|
|
if (Ordering2Val)
|
|
|
|
Args.push_back(Ordering2Val);
|
|
|
|
|
|
|
|
// Now, the return type.
|
|
|
|
if (CASExpected) {
|
|
|
|
ResultTy = Type::getInt1Ty(Ctx);
|
2021-08-17 09:24:22 +08:00
|
|
|
Attr = Attr.addRetAttribute(Ctx, Attribute::ZExt);
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
} else if (HasResult && UseSizedLibcall)
|
|
|
|
ResultTy = SizedIntTy;
|
|
|
|
else
|
|
|
|
ResultTy = Type::getVoidTy(Ctx);
|
|
|
|
|
|
|
|
// Done with setting up arguments and return types, create the call:
|
|
|
|
SmallVector<Type *, 6> ArgTys;
|
|
|
|
for (Value *Arg : Args)
|
|
|
|
ArgTys.push_back(Arg->getType());
|
|
|
|
FunctionType *FnType = FunctionType::get(ResultTy, ArgTys, false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 10:28:03 +08:00
|
|
|
FunctionCallee LibcallFn =
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
M->getOrInsertFunction(TLI->getLibcallName(RTLibType), FnType, Attr);
|
|
|
|
CallInst *Call = Builder.CreateCall(LibcallFn, Args);
|
|
|
|
Call->setAttributes(Attr);
|
|
|
|
Value *Result = Call;
|
|
|
|
|
|
|
|
// And then, extract the results...
|
|
|
|
if (ValueOperand && !UseSizedLibcall)
|
|
|
|
Builder.CreateLifetimeEnd(AllocaValue_i8, SizeVal64);
|
|
|
|
|
|
|
|
if (CASExpected) {
|
|
|
|
// The final result from the CAS is {load of 'expected' alloca, bool result
|
|
|
|
// from call}
|
|
|
|
Type *FinalResultTy = I->getType();
|
|
|
|
Value *V = UndefValue::get(FinalResultTy);
|
2019-02-02 04:44:24 +08:00
|
|
|
Value *ExpectedOut = Builder.CreateAlignedLoad(
|
|
|
|
CASExpected->getType(), AllocaCASExpected, AllocaAlignment);
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
Builder.CreateLifetimeEnd(AllocaCASExpected_i8, SizeVal64);
|
|
|
|
V = Builder.CreateInsertValue(V, ExpectedOut, 0);
|
|
|
|
V = Builder.CreateInsertValue(V, Result, 1);
|
|
|
|
I->replaceAllUsesWith(V);
|
|
|
|
} else if (HasResult) {
|
|
|
|
Value *V;
|
|
|
|
if (UseSizedLibcall)
|
|
|
|
V = Builder.CreateBitOrPointerCast(Result, I->getType());
|
|
|
|
else {
|
2019-02-02 04:44:24 +08:00
|
|
|
V = Builder.CreateAlignedLoad(I->getType(), AllocaResult,
|
|
|
|
AllocaAlignment);
|
Add __atomic_* lowering to AtomicExpandPass.
(Recommit of r266002, with r266011, r266016, and not accidentally
including an extra unused/uninitialized element in LibcallRoutineNames)
AtomicExpandPass can now lower atomic load, atomic store, atomicrmw, and
cmpxchg instructions to __atomic_* library calls, when the target
doesn't support atomics of a given size.
This is the first step towards moving all atomic lowering from clang
into llvm. When all is done, the behavior of __sync_* builtins,
__atomic_* builtins, and C11 atomics will be unified.
Previously LLVM would pass everything through to the ISelLowering
code. There, unsupported atomic instructions would turn into __sync_*
library calls. Because of that behavior, Clang currently avoids emitting
llvm IR atomic instructions when this would happen, and emits __atomic_*
library functions itself, in the frontend.
This change makes LLVM able to emit __atomic_* libcalls, and thus will
eventually allow clang to depend on LLVM to do the right thing.
It is advantageous to do the new lowering to atomic libcalls in
AtomicExpandPass, before ISel time, because it's important that all
atomic operations for a given size either lower to __atomic_*
libcalls (which may use locks), or native instructions which won't. No
mixing and matching.
At the moment, this code is enabled only for SPARC, as a
demonstration. The next commit will expand support to all of the other
targets.
Differential Revision: http://reviews.llvm.org/D18200
llvm-svn: 266115
2016-04-13 04:18:48 +08:00
|
|
|
Builder.CreateLifetimeEnd(AllocaResult_i8, SizeVal64);
|
|
|
|
}
|
|
|
|
I->replaceAllUsesWith(V);
|
|
|
|
}
|
|
|
|
I->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|