forked from OSchip/llvm-project
Revert r55018 and apply the correct "fix" for the 64-bit sub_and_fetch atomic.
Just expand it like the other X-bit sub_and_fetches. llvm-svn: 55023
This commit is contained in:
parent
d5834e90dc
commit
f00f3055d8
|
@ -358,10 +358,10 @@ def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3,
|
|||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2,
|
||||
|
@ -815,32 +815,6 @@ def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc),
|
|||
return false;
|
||||
}]>;
|
||||
|
||||
def atomic_load_sub_8 : PatFrag<(ops node:$ptr, node:$dec),
|
||||
(atomic_load_sub node:$ptr, node:$dec), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getValueType(0) == MVT::i8;
|
||||
return false;
|
||||
}]>;
|
||||
def atomic_load_sub_16 : PatFrag<(ops node:$ptr, node:$dec),
|
||||
(atomic_load_sub node:$ptr, node:$dec), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getValueType(0) == MVT::i16;
|
||||
return false;
|
||||
}]>;
|
||||
def atomic_load_sub_32 : PatFrag<(ops node:$ptr, node:$dec),
|
||||
(atomic_load_sub node:$ptr, node:$dec), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getValueType(0) == MVT::i32;
|
||||
return false;
|
||||
}]>;
|
||||
def atomic_load_sub_64 : PatFrag<(ops node:$ptr, node:$dec),
|
||||
(atomic_load_sub node:$ptr, node:$dec), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getValueType(0) == MVT::i64;
|
||||
return false;
|
||||
}]>;
|
||||
|
||||
|
||||
def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc),
|
||||
(atomic_swap node:$ptr, node:$inc), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
|
@ -867,6 +841,7 @@ def atomic_swap_64 : PatFrag<(ops node:$ptr, node:$inc),
|
|||
}]>;
|
||||
|
||||
|
||||
|
||||
// setcc convenience fragments.
|
||||
def setoeq : PatFrag<(ops node:$lhs, node:$rhs),
|
||||
(setcc node:$lhs, node:$rhs, SETOEQ)>;
|
||||
|
|
|
@ -297,9 +297,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
|||
setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i16, Custom);
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32, Custom);
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64, Custom);
|
||||
|
||||
setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i8, Expand);
|
||||
setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i16, Expand);
|
||||
setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i32, Expand);
|
||||
setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i64, Expand);
|
||||
|
||||
// Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion.
|
||||
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
|
||||
|
|
|
@ -1143,13 +1143,6 @@ def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
|
|||
"lock\n\txadd\t$val, $ptr",
|
||||
[(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
|
||||
TB, LOCK;
|
||||
|
||||
let Defs = [EFLAGS] in
|
||||
def LXSUB64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
|
||||
"lock\n\txadd\t$val, $ptr",
|
||||
[(set GR64:$dst, (atomic_load_sub_64 addr:$ptr, GR64:$val))]>,
|
||||
TB, LOCK;
|
||||
|
||||
def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
|
||||
"xchg\t$val, $ptr",
|
||||
[(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
|
||||
|
|
|
@ -2634,22 +2634,6 @@ def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
|
|||
TB, LOCK;
|
||||
}
|
||||
|
||||
// Atomic exchange and subtract
|
||||
let Constraints = "$val = $dst", Defs = [EFLAGS] in {
|
||||
def LXSUB32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
|
||||
"lock\n\txadd{l}\t{$val, $ptr|$ptr, $val}",
|
||||
[(set GR32:$dst, (atomic_load_sub_32 addr:$ptr, GR32:$val))]>,
|
||||
TB, LOCK;
|
||||
def LXSUB16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
|
||||
"lock\n\txadd{w}\t{$val, $ptr|$ptr, $val}",
|
||||
[(set GR16:$dst, (atomic_load_sub_16 addr:$ptr, GR16:$val))]>,
|
||||
TB, OpSize, LOCK;
|
||||
def LXSUB8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
|
||||
"lock\n\txadd{b}\t{$val, $ptr|$ptr, $val}",
|
||||
[(set GR8:$dst, (atomic_load_sub_8 addr:$ptr, GR8:$val))]>,
|
||||
TB, LOCK;
|
||||
}
|
||||
|
||||
// Atomic exchange, and, or, xor
|
||||
let Constraints = "$val = $dst", Defs = [EFLAGS],
|
||||
usesCustomDAGSchedInserter = 1 in {
|
||||
|
|
Loading…
Reference in New Issue