[X86] Dont run combineSetCCAtomicArith() when the cmp has multiple uses

We would miscompile the following:

  void g(int);
  int f(volatile long long *p) {
    bool b = __atomic_fetch_add(p, 1, __ATOMIC_SEQ_CST) < 0;
    g(b ? 12 : 34);
    return b ? 56 : 78;
  }

into

  pushq   %rax
  lock            incq    (%rdi)
  movl    $12, %eax
  movl    $34, %edi
  cmovlel %eax, %edi
  callq   g(int)
  testq   %rax, %rax   <---- Bad.
  movl    $56, %ecx
  movl    $78, %eax
  cmovsl  %ecx, %eax
  popq    %rcx
  retq

because the code failed to take into account that the cmp has multiple
uses, replaced one of them, and left the other one comparing garbage.

llvm-svn: 291630
This commit is contained in:
Hans Wennborg 2017-01-11 00:49:54 +00:00
parent 0b63b31b2e
commit 12de693747
2 changed files with 22 additions and 0 deletions

View File

@ -29404,6 +29404,12 @@ static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
(Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
return SDValue();
// Can't replace the cmp if it has more uses than the one we're looking at.
// FIXME: We would like to be able to handle this, but would need to make sure
// all uses were updated.
if (!Cmp.hasOneUse())
return SDValue();
// This only applies to variations of the common case:
// (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
// (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)

View File

@ -176,4 +176,20 @@ entry:
ret i8 %tmp2
}
define i8 @test_add_1_cmov_cmov(i64* %p, i8* %q) #0 {
; TODO: It's possible to use "lock inc" here, but both cmovs need to be updated.
; CHECK-LABEL: test_add_1_cmov_cmov:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: testq %rax, %rax
entry:
%add = atomicrmw add i64* %p, i64 1 seq_cst
%cmp = icmp slt i64 %add, 0
%s1 = select i1 %cmp, i8 12, i8 34
store i8 %s1, i8* %q
%s2 = select i1 %cmp, i8 56, i8 78
ret i8 %s2
}
attributes #0 = { nounwind }