[aarch64][globalisel] Legalize G_ATOMIC_CMPXCHG_WITH_SUCCESS and G_ATOMICRMW_*

G_ATOMICRMW_* is generally legal on AArch64. The exception is G_ATOMICRMW_NAND.

G_ATOMIC_CMPXCHG_WITH_SUCCESS needs to be lowered to G_ATOMIC_CMPXCHG with an
external comparison.

Note that IRTranslator doesn't generate these instructions yet.

llvm-svn: 319466
This commit is contained in:
Daniel Sanders 2017-11-30 20:11:42 +00:00
parent d78d65c2a4
commit aef1dfc690
7 changed files with 130 additions and 1 deletions

View File

@ -734,6 +734,24 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val,
unsigned Idx);
/// Build and insert `OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
/// MMO`.
///
/// Atomically replace the value at \p Addr with \p NewVal if it is currently
/// \p CmpVal otherwise leaves it unchanged. Puts the original value from \p
/// Addr in \p Res.
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p OldValRes must be a generic virtual register of scalar type.
/// \pre \p Addr must be a generic virtual register with pointer type.
/// \pre \p OldValRes, \p CmpVal, and \p NewVal must be generic virtual
/// registers of the same type.
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
unsigned CmpVal, unsigned NewVal,
MachineMemOperand &MMO);
};
} // End namespace llvm.

View File

@ -265,6 +265,9 @@ HANDLE_TARGET_OPCODE(G_LOAD)
/// Generic store.
HANDLE_TARGET_OPCODE(G_STORE)
/// Generic atomic cmpxchg with internal success check.
HANDLE_TARGET_OPCODE(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
/// Generic atomic cmpxchg.
HANDLE_TARGET_OPCODE(G_ATOMIC_CMPXCHG)

View File

@ -482,6 +482,16 @@ def G_STORE : Instruction {
let mayStore = 1;
}
// Generic atomic cmpxchg with internal success check. Expects a
// MachineMemOperand in addition to explicit operands.
def G_ATOMIC_CMPXCHG_WITH_SUCCESS : Instruction {
let OutOperandList = (outs type0:$oldval, type1:$success);
let InOperandList = (ins type2:$addr, type0:$cmpval, type0:$newval);
let hasSideEffects = 0;
let mayLoad = 1;
let mayStore = 1;
}
// Generic atomic cmpxchg. Expects a MachineMemOperand in addition to explicit
// operands.
def G_ATOMIC_CMPXCHG : Instruction {

View File

@ -868,6 +868,18 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
unsigned OldValRes = MI.getOperand(0).getReg();
unsigned SuccessRes = MI.getOperand(1).getReg();
unsigned Addr = MI.getOperand(2).getReg();
unsigned CmpVal = MI.getOperand(3).getReg();
unsigned NewVal = MI.getOperand(4).getReg();
MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal,
**MI.memoperands_begin());
MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal);
MI.eraseFromParent();
return Legalized;
}
}
}

View File

@ -658,6 +658,31 @@ MachineInstrBuilder MachineIRBuilder::buildExtractVectorElement(unsigned Res,
.addUse(Idx);
}
MachineInstrBuilder
MachineIRBuilder::buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
unsigned CmpVal, unsigned NewVal,
MachineMemOperand &MMO) {
#ifndef NDEBUG
LLT OldValResTy = MRI->getType(OldValRes);
LLT AddrTy = MRI->getType(Addr);
LLT CmpValTy = MRI->getType(CmpVal);
LLT NewValTy = MRI->getType(NewVal);
assert(OldValResTy.isScalar() && "invalid operand type");
assert(AddrTy.isPointer() && "invalid operand type");
assert(CmpValTy.isValid() && "invalid operand type");
assert(NewValTy.isValid() && "invalid operand type");
assert(OldValResTy == CmpValTy && "type mismatch");
assert(OldValResTy == NewValTy && "type mismatch");
#endif
return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
.addDef(OldValRes)
.addUse(Addr)
.addUse(CmpVal)
.addUse(NewVal)
.addMemOperand(&MMO);
}
void MachineIRBuilder::validateTruncExt(unsigned Dst, unsigned Src,
bool IsExtend) {
#ifndef NDEBUG

View File

@ -351,8 +351,10 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) {
setAction({G_VAARG, Ty}, Custom);
if (ST.hasLSE()) {
for (auto Ty : {s8, s16, s32, s64})
for (auto Ty : {s8, s16, s32, s64}) {
setAction({G_ATOMIC_CMPXCHG_WITH_SUCCESS, Ty}, Lower);
setAction({G_ATOMIC_CMPXCHG, Ty}, Legal);
}
setAction({G_ATOMIC_CMPXCHG, 1, p0}, Legal);
for (unsigned Op :

View File

@ -0,0 +1,59 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64-- -mattr=+lse -run-pass=legalizer -verify-machineinstrs -global-isel %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
define void @cmpxchg_i32(i64* %addr) { ret void }
define void @cmpxchg_i64(i64* %addr) { ret void }
...
---
name: cmpxchg_i32
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: cmpxchg_i32
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
; CHECK: [[CMP:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 8 on %ir.addr)
; CHECK: [[SRES:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[RES]](s32), [[CMP]]
; CHECK: [[SRES32:%[0-9]+]]:_(s32) = COPY [[SRES]]
; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[RES]], [[SRES32]]
; CHECK: %w0 = COPY [[MUL]]
%0:_(p0) = COPY %x0
%1:_(s32) = G_CONSTANT i32 0
%2:_(s32) = G_CONSTANT i32 1
%3:_(s32), %4:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
%5:_(s32) = G_ANYEXT %4
%6:_(s32) = G_MUL %3, %5
%w0 = COPY %6(s32)
...
---
name: cmpxchg_i64
body: |
bb.0:
liveins: %x0
; CHECK-LABEL: name: cmpxchg_i64
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
; CHECK: [[CMP:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 8 on %ir.addr)
; CHECK: [[SRES:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[RES]](s64), [[CMP]]
; CHECK: [[SRES64:%[0-9]+]]:_(s64) = G_ANYEXT [[SRES]]
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[RES]], [[SRES64]]
; CHECK: %x0 = COPY [[MUL]]
%0:_(p0) = COPY %x0
%1:_(s64) = G_CONSTANT i64 0
%2:_(s64) = G_CONSTANT i64 1
%3:_(s64), %4:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
%5:_(s64) = G_ANYEXT %4
%6:_(s64) = G_MUL %3, %5
%x0 = COPY %6(s64)
...