[X86] Use X86 instead of X32 as a check prefix in atomic-idempotent.ll. NFC

X32 can refer to a 64-bit ABI that uses 32-bit ints, longs, and pointers.

I plan to add gnux32 command lines to this test so this prepares for that.

Also remove some check lines that have a prefix that is not in any run lines.

llvm-svn: 360642
This commit is contained in:
Craig Topper 2019-05-14 03:07:56 +00:00
parent 39084ce3ce
commit cc761e6fae
1 changed files with 145 additions and 149 deletions

View File

@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=X64
; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=X32
; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=X86
; On x86, an atomic rmw operation that does not modify the value in memory
; (such as atomic add 0) can be replaced by an mfence followed by a mov.
@ -14,12 +14,12 @@ define i8 @add8(i8* %p) {
; X64-NEXT: movb (%rdi), %al
; X64-NEXT: retq
;
; X32-LABEL: add8:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: mfence
; X32-NEXT: movb (%eax), %al
; X32-NEXT: retl
; X86-LABEL: add8:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: mfence
; X86-NEXT: movb (%eax), %al
; X86-NEXT: retl
%1 = atomicrmw add i8* %p, i8 0 monotonic
ret i8 %1
}
@ -31,12 +31,12 @@ define i16 @or16(i16* %p) {
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: retq
;
; X32-LABEL: or16:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: mfence
; X32-NEXT: movzwl (%eax), %eax
; X32-NEXT: retl
; X86-LABEL: or16:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: mfence
; X86-NEXT: movzwl (%eax), %eax
; X86-NEXT: retl
%1 = atomicrmw or i16* %p, i16 0 acquire
ret i16 %1
}
@ -48,12 +48,12 @@ define i32 @xor32(i32* %p) {
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: retq
;
; X32-LABEL: xor32:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: mfence
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: retl
; X86-LABEL: xor32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: mfence
; X86-NEXT: movl (%eax), %eax
; X86-NEXT: retl
%1 = atomicrmw xor i32* %p, i32 0 release
ret i32 %1
}
@ -65,30 +65,30 @@ define i64 @sub64(i64* %p) {
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: retq
;
; X32-LABEL: sub64:
; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: pushl %esi
; X32-NEXT: .cfi_def_cfa_offset 12
; X32-NEXT: .cfi_offset %esi, -12
; X32-NEXT: .cfi_offset %ebx, -8
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl (%esi), %eax
; X32-NEXT: movl 4(%esi), %edx
; X32-NEXT: .p2align 4, 0x90
; X32-NEXT: .LBB3_1: # %atomicrmw.start
; X32-NEXT: # =>This Inner Loop Header: Depth=1
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: movl %eax, %ebx
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB3_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
; X32-NEXT: popl %esi
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
; X86-LABEL: sub64:
; X86: # %bb.0:
; X86-NEXT: pushl %ebx
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 12
; X86-NEXT: .cfi_offset %esi, -12
; X86-NEXT: .cfi_offset %ebx, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl (%esi), %eax
; X86-NEXT: movl 4(%esi), %edx
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB3_1: # %atomicrmw.start
; X86-NEXT: # =>This Inner Loop Header: Depth=1
; X86-NEXT: movl %edx, %ecx
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: lock cmpxchg8b (%esi)
; X86-NEXT: jne .LBB3_1
; X86-NEXT: # %bb.2: # %atomicrmw.end
; X86-NEXT: popl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: popl %ebx
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
%1 = atomicrmw sub i64* %p, i64 0 seq_cst
ret i64 %1
}
@ -105,44 +105,44 @@ define i128 @or128(i128* %p) {
; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
;
; X32-LABEL: or128:
; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: .cfi_def_cfa_register %ebp
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: andl $-8, %esp
; X32-NEXT: subl $16, %esp
; X32-NEXT: .cfi_offset %esi, -16
; X32-NEXT: .cfi_offset %edi, -12
; X32-NEXT: movl 8(%ebp), %esi
; X32-NEXT: movl %esp, %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl 12(%ebp)
; X32-NEXT: pushl %eax
; X32-NEXT: calll __sync_fetch_and_or_16
; X32-NEXT: addl $20, %esp
; X32-NEXT: movl (%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, 8(%esi)
; X32-NEXT: movl %edx, 12(%esi)
; X32-NEXT: movl %eax, (%esi)
; X32-NEXT: movl %ecx, 4(%esi)
; X32-NEXT: movl %esi, %eax
; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
; X32-NEXT: popl %edi
; X32-NEXT: popl %ebp
; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl $4
; X86-LABEL: or128:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %ebp, -8
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: .cfi_def_cfa_register %ebp
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: .cfi_offset %esi, -16
; X86-NEXT: .cfi_offset %edi, -12
; X86-NEXT: movl 8(%ebp), %esi
; X86-NEXT: movl %esp, %eax
; X86-NEXT: pushl $0
; X86-NEXT: pushl $0
; X86-NEXT: pushl $0
; X86-NEXT: pushl $0
; X86-NEXT: pushl 12(%ebp)
; X86-NEXT: pushl %eax
; X86-NEXT: calll __sync_fetch_and_or_16
; X86-NEXT: addl $20, %esp
; X86-NEXT: movl (%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: leal -8(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebp
; X86-NEXT: .cfi_def_cfa %esp, 4
; X86-NEXT: retl $4
%1 = atomicrmw or i128* %p, i128 0 monotonic
ret i128 %1
}
@ -155,12 +155,12 @@ define i32 @and32 (i32* %p) {
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: retq
;
; X32-LABEL: and32:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: mfence
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: retl
; X86-LABEL: and32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: mfence
; X86-NEXT: movl (%eax), %eax
; X86-NEXT: retl
%1 = atomicrmw and i32* %p, i32 -1 acq_rel
ret i32 %1
}
@ -208,10 +208,10 @@ define void @or32_nouse_seq_cst(i32* %p) {
; X64-NEXT: lock orl $0, (%rsp)
; X64-NEXT: retq
;
; X32-LABEL: or32_nouse_seq_cst:
; X32: # %bb.0:
; X32-NEXT: lock orl $0, (%esp)
; X32-NEXT: retl
; X86-LABEL: or32_nouse_seq_cst:
; X86: # %bb.0:
; X86-NEXT: lock orl $0, (%esp)
; X86-NEXT: retl
atomicrmw or i32* %p, i32 0 seq_cst
ret void
}
@ -223,30 +223,30 @@ define void @or64_nouse_seq_cst(i64* %p) {
; X64-NEXT: lock orl $0, (%rsp)
; X64-NEXT: retq
;
; X32-LABEL: or64_nouse_seq_cst:
; X32: # %bb.0:
; X32-NEXT: pushl %ebx
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: pushl %esi
; X32-NEXT: .cfi_def_cfa_offset 12
; X32-NEXT: .cfi_offset %esi, -12
; X32-NEXT: .cfi_offset %ebx, -8
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl (%esi), %eax
; X32-NEXT: movl 4(%esi), %edx
; X32-NEXT: .p2align 4, 0x90
; X32-NEXT: .LBB11_1: # %atomicrmw.start
; X32-NEXT: # =>This Inner Loop Header: Depth=1
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: movl %eax, %ebx
; X32-NEXT: lock cmpxchg8b (%esi)
; X32-NEXT: jne .LBB11_1
; X32-NEXT: # %bb.2: # %atomicrmw.end
; X32-NEXT: popl %esi
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: popl %ebx
; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: retl
; X86-LABEL: or64_nouse_seq_cst:
; X86: # %bb.0:
; X86-NEXT: pushl %ebx
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_def_cfa_offset 12
; X86-NEXT: .cfi_offset %esi, -12
; X86-NEXT: .cfi_offset %ebx, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl (%esi), %eax
; X86-NEXT: movl 4(%esi), %edx
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB11_1: # %atomicrmw.start
; X86-NEXT: # =>This Inner Loop Header: Depth=1
; X86-NEXT: movl %edx, %ecx
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: lock cmpxchg8b (%esi)
; X86-NEXT: jne .LBB11_1
; X86-NEXT: # %bb.2: # %atomicrmw.end
; X86-NEXT: popl %esi
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: popl %ebx
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
atomicrmw or i64* %p, i64 0 seq_cst
ret void
}
@ -264,32 +264,28 @@ define void @or128_nouse_seq_cst(i128* %p) {
; X64-NEXT: .cfi_def_cfa_offset 8
; X64-NEXT: retq
;
; X32-LABEL: or128_nouse_seq_cst:
; X32: # %bb.0:
; X32-NEXT: pushl %ebp
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %ebp, -8
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: .cfi_def_cfa_register %ebp
; X32-NEXT: andl $-8, %esp
; X32-NEXT: subl $16, %esp
; X32-NEXT: movl %esp, %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl 8(%ebp)
; X32-NEXT: pushl %eax
; X32-NEXT: calll __sync_fetch_and_or_16
; X32-NEXT: addl $20, %esp
; X32-NEXT: movl %ebp, %esp
; X32-NEXT: popl %ebp
; X32-NEXT: .cfi_def_cfa %esp, 4
; X32-NEXT: retl
; X128-LABEL: or128_nouse_seq_cst:
; X128: # %bb.0:
; X128-NEXT: lock orl $0, -{{[0-9]+}}(%esp)
; X128-NEXT: retl
; X86-LABEL: or128_nouse_seq_cst:
; X86: # %bb.0:
; X86-NEXT: pushl %ebp
; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: .cfi_offset %ebp, -8
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: .cfi_def_cfa_register %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: movl %esp, %eax
; X86-NEXT: pushl $0
; X86-NEXT: pushl $0
; X86-NEXT: pushl $0
; X86-NEXT: pushl $0
; X86-NEXT: pushl 8(%ebp)
; X86-NEXT: pushl %eax
; X86-NEXT: calll __sync_fetch_and_or_16
; X86-NEXT: addl $20, %esp
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: .cfi_def_cfa %esp, 4
; X86-NEXT: retl
atomicrmw or i128* %p, i128 0 seq_cst
ret void
}
@ -301,10 +297,10 @@ define void @or16_nouse_seq_cst(i16* %p) {
; X64-NEXT: lock orl $0, (%rsp)
; X64-NEXT: retq
;
; X32-LABEL: or16_nouse_seq_cst:
; X32: # %bb.0:
; X32-NEXT: lock orl $0, (%esp)
; X32-NEXT: retl
; X86-LABEL: or16_nouse_seq_cst:
; X86: # %bb.0:
; X86-NEXT: lock orl $0, (%esp)
; X86-NEXT: retl
atomicrmw or i16* %p, i16 0 seq_cst
ret void
}
@ -315,10 +311,10 @@ define void @or8_nouse_seq_cst(i8* %p) {
; X64-NEXT: lock orl $0, (%rsp)
; X64-NEXT: retq
;
; X32-LABEL: or8_nouse_seq_cst:
; X32: # %bb.0:
; X32-NEXT: lock orl $0, (%esp)
; X32-NEXT: retl
; X86-LABEL: or8_nouse_seq_cst:
; X86: # %bb.0:
; X86-NEXT: lock orl $0, (%esp)
; X86-NEXT: retl
atomicrmw or i8* %p, i8 0 seq_cst
ret void
}