forked from OSchip/llvm-project
[X86] Use OR32mi8Locked instead of LOCK_OR32mi8 in emitLockedStackOp.
They encode the same way, but OR32mi8Locked sets hasUnmodeledSideEffects set which should be stronger than the mayLoad/mayStore on LOCK_OR32mi8. I think this makes sense since we are using it as a fence. This also seems to hide the operation from the speculative load hardening pass so I've reverted r360511. llvm-svn: 360747
This commit is contained in:
parent
a23cc727d8
commit
384d46c0d5
|
@ -25872,7 +25872,7 @@ static SDValue emitLockedStackOp(SelectionDAG &DAG,
|
|||
DAG.getRegister(0, MVT::i16), // Segment.
|
||||
Zero,
|
||||
Chain};
|
||||
SDNode *Res = DAG.getMachineNode(X86::LOCK_OR32mi8, DL, MVT::i32,
|
||||
SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
|
||||
MVT::Other, Ops);
|
||||
return SDValue(Res, 1);
|
||||
}
|
||||
|
@ -25887,7 +25887,7 @@ static SDValue emitLockedStackOp(SelectionDAG &DAG,
|
|||
Zero,
|
||||
Chain
|
||||
};
|
||||
SDNode *Res = DAG.getMachineNode(X86::LOCK_OR32mi8, DL, MVT::i32,
|
||||
SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
|
||||
MVT::Other, Ops);
|
||||
return SDValue(Res, 1);
|
||||
}
|
||||
|
|
|
@ -1719,11 +1719,9 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
|
|||
|
||||
// If we have at least one (non-frame-index, non-RIP) register operand,
|
||||
// and neither operand is load-dependent, we need to check the load.
|
||||
// Also handle explicit references to RSP as used by idempotent atomic
|
||||
// or with 0.
|
||||
unsigned BaseReg = 0, IndexReg = 0;
|
||||
if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP &&
|
||||
BaseMO.getReg() != X86::RSP && BaseMO.getReg() != X86::NoRegister)
|
||||
BaseMO.getReg() != X86::NoRegister)
|
||||
BaseReg = BaseMO.getReg();
|
||||
if (IndexMO.getReg() != X86::NoRegister)
|
||||
IndexReg = IndexMO.getReg();
|
||||
|
|
|
@ -1148,17 +1148,12 @@ entry:
|
|||
define void @idempotent_atomic(i32* %x) speculative_load_hardening {
|
||||
; X64-LABEL: idempotent_atomic:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: movq %rsp, %rax
|
||||
; X64-NEXT: movq $-1, %rcx
|
||||
; X64-NEXT: sarq $63, %rax
|
||||
; X64-NEXT: lock orl $0, -64(%rsp)
|
||||
; X64-NEXT: shlq $47, %rax
|
||||
; X64-NEXT: orq %rax, %rsp
|
||||
; X64-NEXT: lock orl $0, -{{[0-9]+}}(%rsp)
|
||||
; X64-NEXT: retq
|
||||
;
|
||||
; X64-LFENCE-LABEL: idempotent_atomic:
|
||||
; X64-LFENCE: # %bb.0:
|
||||
; X64-LFENCE-NEXT: lock orl $0, -64(%rsp)
|
||||
; X64-LFENCE-NEXT: lock orl $0, -{{[0-9]+}}(%rsp)
|
||||
; X64-LFENCE-NEXT: retq
|
||||
%tmp = atomicrmw or i32* %x, i32 0 seq_cst
|
||||
ret void
|
||||
|
|
Loading…
Reference in New Issue