Revert r289955 and r289962. This is causing lots of ASAN failures for us.

Not sure whether it causes and ASAN false positive or whether it
actually leads to incorrect code or whether it even exposes bad code.
Hans, I'll get you instructions to reproduce this.

llvm-svn: 290066
This commit is contained in:
Daniel Jasper 2016-12-18 14:36:38 +00:00
parent 4b88a770ef
commit 373f9a6a0c
2 changed files with 10 additions and 63 deletions

View File

@ -28985,19 +28985,11 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
/// Combine brcond/cmov/setcc/.. based on comparing the result of
/// atomic_load_add to use EFLAGS produced by the addition
/// directly if possible. For example:
///
/// (setcc (cmp (atomic_load_add x, -C) C), COND_E)
/// becomes:
/// (setcc (LADD x, -C), COND_E)
///
/// and
/// Combine:
/// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
/// becomes:
/// to:
/// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
///
/// i.e., reusing the EFLAGS produced by the LOCKed instruction.
/// Note that this is only legal for some op/cc combinations.
static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
SelectionDAG &DAG) {
@ -29006,7 +28998,7 @@ static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
(Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
return SDValue();
// This applies to variations of the common case:
// This only applies to variations of the common case:
// (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
// (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
// (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
@ -29025,9 +29017,8 @@ static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
return SDValue();
auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
if (!CmpRHSC)
if (!CmpRHSC || CmpRHSC->getZExtValue() != 0)
return SDValue();
APInt Comparand = CmpRHSC->getAPIntValue();
const unsigned Opc = CmpLHS.getOpcode();
@ -29043,19 +29034,16 @@ static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
if (Opc == ISD::ATOMIC_LOAD_SUB)
Addend = -Addend;
if (Comparand == -Addend) {
// No change to CC.
} else if (CC == X86::COND_S && Comparand == 0 && Addend == 1) {
if (CC == X86::COND_S && Addend == 1)
CC = X86::COND_LE;
} else if (CC == X86::COND_NS && Comparand == 0 && Addend == 1) {
else if (CC == X86::COND_NS && Addend == 1)
CC = X86::COND_G;
} else if (CC == X86::COND_G && Comparand == 0 && Addend == -1) {
else if (CC == X86::COND_G && Addend == -1)
CC = X86::COND_GE;
} else if (CC == X86::COND_LE && Comparand == 0 && Addend == -1) {
else if (CC == X86::COND_LE && Addend == -1)
CC = X86::COND_L;
} else {
else
return SDValue();
}
SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG);
DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),

View File

@ -176,45 +176,4 @@ entry:
ret i8 %tmp2
}
define i8 @test_sub_1_setcc_eq(i64* %p) #0 {
; CHECK-LABEL: test_sub_1_setcc_eq:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: lock decq (%rdi)
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
entry:
%tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
%tmp1 = icmp eq i64 %tmp0, 1
%tmp2 = zext i1 %tmp1 to i8
ret i8 %tmp2
}
define i8 @test_add_5_setcc_ne(i64* %p) #0 {
; CHECK-LABEL: test_add_5_setcc_ne:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: lock addq $5, (%rdi)
; CHECK-NEXT: setne %al
; CHECK-NEXT: retq
entry:
%tmp0 = atomicrmw add i64* %p, i64 5 seq_cst
%tmp1 = icmp ne i64 %tmp0, -5
%tmp2 = zext i1 %tmp1 to i8
ret i8 %tmp2
}
define i8 @test_add_5_setcc_ne_comparand_mismatch(i64* %p) #0 {
; CHECK-LABEL: test_add_5_setcc_ne_comparand_mismatch:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movl $5, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: testq %rax, %rax
; CHECK-NEXT: setne %al
; CHECK-NEXT: retq
entry:
%tmp0 = atomicrmw add i64* %p, i64 5 seq_cst
%tmp1 = icmp ne i64 %tmp0, 0
%tmp2 = zext i1 %tmp1 to i8
ret i8 %tmp2
}
attributes #0 = { nounwind }