forked from OSchip/llvm-project
197 lines
7.4 KiB
LLVM
197 lines
7.4 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=X64
|
|
; RUN: llc < %s -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=X86
|
|
|
|
define { i128, i8 } @muloti_test(i128 %l, i128 %r) unnamed_addr #0 {
|
|
; X64-LABEL: muloti_test:
|
|
; X64: # %bb.0: # %start
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: testq %rcx, %rcx
|
|
; X64-NEXT: setne %al
|
|
; X64-NEXT: testq %rsi, %rsi
|
|
; X64-NEXT: setne %r9b
|
|
; X64-NEXT: andb %al, %r9b
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: mulq %rdx
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: seto %r10b
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: seto %r11b
|
|
; X64-NEXT: orb %r10b, %r11b
|
|
; X64-NEXT: addq %rsi, %rcx
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: addq %rcx, %rdx
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: orb %r11b, %cl
|
|
; X64-NEXT: orb %r9b, %cl
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: muloti_test:
|
|
; X86: # %bb.0: # %start
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: .cfi_def_cfa_offset 12
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: .cfi_def_cfa_offset 16
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: .cfi_def_cfa_offset 20
|
|
; X86-NEXT: subl $28, %esp
|
|
; X86-NEXT: .cfi_def_cfa_offset 48
|
|
; X86-NEXT: .cfi_offset %esi, -20
|
|
; X86-NEXT: .cfi_offset %edi, -16
|
|
; X86-NEXT: .cfi_offset %ebx, -12
|
|
; X86-NEXT: .cfi_offset %ebp, -8
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: seto {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: mull %edi
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: seto {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: addl %ecx, %esi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: addl %esi, %ecx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %eax, %edi
|
|
; X86-NEXT: seto {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: seto {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: addl %edi, %esi
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %ebp, %ebx
|
|
; X86-NEXT: movl %edx, %edi
|
|
; X86-NEXT: addl %esi, %edi
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %ecx, %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: mull %ebx
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: mull %ecx
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: addl %ebx, %ecx
|
|
; X86-NEXT: adcl $0, %esi
|
|
; X86-NEXT: movl %ebp, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %ebx
|
|
; X86-NEXT: addl %ecx, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: adcl %esi, %ebx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: mull %ebp
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: addl %ebx, %esi
|
|
; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X86-NEXT: adcl %eax, %ecx
|
|
; X86-NEXT: xorl %edx, %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: mull %edx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: movl %eax, %ebx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: xorl %edx, %edx
|
|
; X86-NEXT: mull %edx
|
|
; X86-NEXT: addl %ebx, %eax
|
|
; X86-NEXT: adcl %ebp, %edx
|
|
; X86-NEXT: addl %esi, %eax
|
|
; X86-NEXT: adcl %ecx, %edx
|
|
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: adcl %edi, %edx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp)
|
|
; X86-NEXT: setne %cl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: testl %esi, %esi
|
|
; X86-NEXT: setne %ch
|
|
; X86-NEXT: andb %cl, %ch
|
|
; X86-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
|
|
; X86-NEXT: orb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Folded Reload
|
|
; X86-NEXT: orb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Folded Reload
|
|
; X86-NEXT: orb %ch, %cl
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp)
|
|
; X86-NEXT: setne %cl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: testl %edi, %edi
|
|
; X86-NEXT: setne %bh
|
|
; X86-NEXT: andb %cl, %bh
|
|
; X86-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
|
|
; X86-NEXT: orb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Folded Reload
|
|
; X86-NEXT: orb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Folded Reload
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: orl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: setne %bl
|
|
; X86-NEXT: orl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, (%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, 4(%ecx)
|
|
; X86-NEXT: movl %eax, 8(%ecx)
|
|
; X86-NEXT: movl %edx, 12(%ecx)
|
|
; X86-NEXT: setne %al
|
|
; X86-NEXT: andb %bl, %al
|
|
; X86-NEXT: orb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Folded Reload
|
|
; X86-NEXT: orb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Folded Reload
|
|
; X86-NEXT: orb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Folded Reload
|
|
; X86-NEXT: orb %bh, %al
|
|
; X86-NEXT: andb $1, %al
|
|
; X86-NEXT: movb %al, 16(%ecx)
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: addl $28, %esp
|
|
; X86-NEXT: .cfi_def_cfa_offset 20
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: .cfi_def_cfa_offset 16
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: .cfi_def_cfa_offset 12
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: .cfi_def_cfa_offset 4
|
|
; X86-NEXT: retl $4
|
|
start:
|
|
%0 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %l, i128 %r) #2
|
|
%1 = extractvalue { i128, i1 } %0, 0
|
|
%2 = extractvalue { i128, i1 } %0, 1
|
|
%3 = zext i1 %2 to i8
|
|
%4 = insertvalue { i128, i8 } undef, i128 %1, 0
|
|
%5 = insertvalue { i128, i8 } %4, i8 %3, 1
|
|
ret { i128, i8 } %5
|
|
}
|
|
|
|
; Function Attrs: nounwind readnone speculatable
|
|
declare { i128, i1 } @llvm.umul.with.overflow.i128(i128, i128) #1
|
|
|
|
attributes #0 = { nounwind readnone uwtable }
|
|
attributes #1 = { nounwind readnone speculatable }
|
|
attributes #2 = { nounwind }
|