forked from OSchip/llvm-project
1626 lines
58 KiB
LLVM
1626 lines
58 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i686-- | FileCheck %s --check-prefixes=CHECK,X86
|
|
; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s --check-prefixes=CHECK,X64
|
|
|
|
;
|
|
; Scalars
|
|
;
|
|
|
|
define void @test_lshr_i128(i128 %x, i128 %a, i128* nocapture %r) nounwind {
|
|
; X86-LABEL: test_lshr_i128:
|
|
; X86: # %bb.0: # %entry
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $20, %esp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl %ebp, %esi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: shrdl %cl, %edi, %esi
|
|
; X86-NEXT: shrl %cl, %edx
|
|
; X86-NEXT: shrl %cl, %edi
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: jne .LBB0_1
|
|
; X86-NEXT: # %bb.2: # %entry
|
|
; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: jmp .LBB0_3
|
|
; X86-NEXT: .LBB0_1:
|
|
; X86-NEXT: movl %edi, %esi
|
|
; X86-NEXT: movl $0, (%esp) # 4-byte Folded Spill
|
|
; X86-NEXT: xorl %edi, %edi
|
|
; X86-NEXT: .LBB0_3: # %entry
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, %edx
|
|
; X86-NEXT: subb $64, %dl
|
|
; X86-NEXT: jb .LBB0_5
|
|
; X86-NEXT: # %bb.4: # %entry
|
|
; X86-NEXT: xorl %edi, %edi
|
|
; X86-NEXT: .LBB0_5: # %entry
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: negb %dl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: shldl %cl, %ebp, %edi
|
|
; X86-NEXT: movl %ebp, %esi
|
|
; X86-NEXT: shll %cl, %esi
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: movl %esi, %ebx
|
|
; X86-NEXT: jne .LBB0_7
|
|
; X86-NEXT: # %bb.6: # %entry
|
|
; X86-NEXT: movl %edi, %ebx
|
|
; X86-NEXT: .LBB0_7: # %entry
|
|
; X86-NEXT: movb %al, %ah
|
|
; X86-NEXT: addb $-64, %ah
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movb %ah, %cl
|
|
; X86-NEXT: shrl %cl, %edi
|
|
; X86-NEXT: testb $32, %ah
|
|
; X86-NEXT: movl $0, %ecx
|
|
; X86-NEXT: jne .LBB0_9
|
|
; X86-NEXT: # %bb.8: # %entry
|
|
; X86-NEXT: movl %edi, %ecx
|
|
; X86-NEXT: .LBB0_9: # %entry
|
|
; X86-NEXT: cmpb $64, %al
|
|
; X86-NEXT: jb .LBB0_10
|
|
; X86-NEXT: # %bb.11: # %entry
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: jmp .LBB0_12
|
|
; X86-NEXT: .LBB0_10:
|
|
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
|
|
; X86-NEXT: orl %ebx, %ecx
|
|
; X86-NEXT: .LBB0_12: # %entry
|
|
; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: jne .LBB0_14
|
|
; X86-NEXT: # %bb.13: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB0_14: # %entry
|
|
; X86-NEXT: movl %ebx, %edx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: shrdl %cl, %esi, %edx
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: jne .LBB0_16
|
|
; X86-NEXT: # %bb.15: # %entry
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB0_16: # %entry
|
|
; X86-NEXT: movb %ah, %cl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: shrdl %cl, %edx, %ebp
|
|
; X86-NEXT: testb $32, %ah
|
|
; X86-NEXT: jne .LBB0_18
|
|
; X86-NEXT: # %bb.17: # %entry
|
|
; X86-NEXT: movl %ebp, %edi
|
|
; X86-NEXT: .LBB0_18: # %entry
|
|
; X86-NEXT: cmpb $64, %al
|
|
; X86-NEXT: jae .LBB0_20
|
|
; X86-NEXT: # %bb.19:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: .LBB0_20: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: testb %al, %al
|
|
; X86-NEXT: je .LBB0_22
|
|
; X86-NEXT: # %bb.21: # %entry
|
|
; X86-NEXT: movl %edi, %ebx
|
|
; X86-NEXT: movl (%esp), %esi # 4-byte Reload
|
|
; X86-NEXT: .LBB0_22: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 12(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 8(%ecx)
|
|
; X86-NEXT: movl %esi, 4(%ecx)
|
|
; X86-NEXT: movl %ebx, (%ecx)
|
|
; X86-NEXT: addl $20, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_lshr_i128:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: shrdq %cl, %rsi, %rdi
|
|
; X64-NEXT: shrq %cl, %rsi
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: testb $64, %cl
|
|
; X64-NEXT: cmovneq %rsi, %rdi
|
|
; X64-NEXT: cmoveq %rsi, %rax
|
|
; X64-NEXT: movq %rax, 8(%r8)
|
|
; X64-NEXT: movq %rdi, (%r8)
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = lshr i128 %x, %a
|
|
store i128 %0, i128* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_ashr_i128(i128 %x, i128 %a, i128* nocapture %r) nounwind {
|
|
; X86-LABEL: test_ashr_i128:
|
|
; X86: # %bb.0: # %entry
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $24, %esp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl %ebp, %esi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: shrdl %cl, %ebx, %esi
|
|
; X86-NEXT: shrl %cl, %edx
|
|
; X86-NEXT: movl %ebx, %edi
|
|
; X86-NEXT: sarl %cl, %edi
|
|
; X86-NEXT: sarl $31, %ebx
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: jne .LBB1_1
|
|
; X86-NEXT: # %bb.2: # %entry
|
|
; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: jmp .LBB1_3
|
|
; X86-NEXT: .LBB1_1:
|
|
; X86-NEXT: movl %edi, %esi
|
|
; X86-NEXT: movl $0, (%esp) # 4-byte Folded Spill
|
|
; X86-NEXT: movl %ebx, %edi
|
|
; X86-NEXT: .LBB1_3: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, %edx
|
|
; X86-NEXT: subb $64, %dl
|
|
; X86-NEXT: jb .LBB1_5
|
|
; X86-NEXT: # %bb.4: # %entry
|
|
; X86-NEXT: movl %ebx, %edi
|
|
; X86-NEXT: .LBB1_5: # %entry
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: negb %dl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: shldl %cl, %ebp, %edi
|
|
; X86-NEXT: movl %ebp, %esi
|
|
; X86-NEXT: shll %cl, %esi
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: movl %esi, %ecx
|
|
; X86-NEXT: jne .LBB1_7
|
|
; X86-NEXT: # %bb.6: # %entry
|
|
; X86-NEXT: movl %edi, %ecx
|
|
; X86-NEXT: .LBB1_7: # %entry
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movb %al, %ah
|
|
; X86-NEXT: addb $-64, %ah
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movb %ah, %cl
|
|
; X86-NEXT: sarl %cl, %edi
|
|
; X86-NEXT: testb $32, %ah
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: jne .LBB1_9
|
|
; X86-NEXT: # %bb.8: # %entry
|
|
; X86-NEXT: movl %edi, %ecx
|
|
; X86-NEXT: .LBB1_9: # %entry
|
|
; X86-NEXT: cmpb $64, %al
|
|
; X86-NEXT: jb .LBB1_10
|
|
; X86-NEXT: # %bb.11: # %entry
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: jmp .LBB1_12
|
|
; X86-NEXT: .LBB1_10:
|
|
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
|
|
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: .LBB1_12: # %entry
|
|
; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: jne .LBB1_14
|
|
; X86-NEXT: # %bb.13: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB1_14: # %entry
|
|
; X86-NEXT: movl %ebx, %edx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: shrdl %cl, %esi, %edx
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: jne .LBB1_16
|
|
; X86-NEXT: # %bb.15: # %entry
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB1_16: # %entry
|
|
; X86-NEXT: movb %ah, %cl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: shrdl %cl, %edx, %ebp
|
|
; X86-NEXT: testb $32, %ah
|
|
; X86-NEXT: jne .LBB1_18
|
|
; X86-NEXT: # %bb.17: # %entry
|
|
; X86-NEXT: movl %ebp, %edi
|
|
; X86-NEXT: .LBB1_18: # %entry
|
|
; X86-NEXT: cmpb $64, %al
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: jae .LBB1_20
|
|
; X86-NEXT: # %bb.19:
|
|
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, %edi
|
|
; X86-NEXT: .LBB1_20: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: testb %al, %al
|
|
; X86-NEXT: je .LBB1_22
|
|
; X86-NEXT: # %bb.21: # %entry
|
|
; X86-NEXT: movl %edi, %ebx
|
|
; X86-NEXT: movl (%esp), %esi # 4-byte Reload
|
|
; X86-NEXT: .LBB1_22: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 12(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 8(%ecx)
|
|
; X86-NEXT: movl %esi, 4(%ecx)
|
|
; X86-NEXT: movl %ebx, (%ecx)
|
|
; X86-NEXT: addl $24, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_ashr_i128:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: shrdq %cl, %rsi, %rdi
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: sarq %cl, %rax
|
|
; X64-NEXT: sarq $63, %rsi
|
|
; X64-NEXT: testb $64, %cl
|
|
; X64-NEXT: cmovneq %rax, %rdi
|
|
; X64-NEXT: cmoveq %rax, %rsi
|
|
; X64-NEXT: movq %rsi, 8(%r8)
|
|
; X64-NEXT: movq %rdi, (%r8)
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = ashr i128 %x, %a
|
|
store i128 %0, i128* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_shl_i128(i128 %x, i128 %a, i128* nocapture %r) nounwind {
|
|
; X86-LABEL: test_shl_i128:
|
|
; X86: # %bb.0: # %entry
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $20, %esp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl %ecx, %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: shll %cl, %ebx
|
|
; X86-NEXT: movl %ebp, %esi
|
|
; X86-NEXT: shll %cl, %esi
|
|
; X86-NEXT: movl %edi, %edx
|
|
; X86-NEXT: shldl %cl, %ebp, %edx
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: jne .LBB2_1
|
|
; X86-NEXT: # %bb.2: # %entry
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: jmp .LBB2_3
|
|
; X86-NEXT: .LBB2_1:
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl $0, (%esp) # 4-byte Folded Spill
|
|
; X86-NEXT: xorl %esi, %esi
|
|
; X86-NEXT: .LBB2_3: # %entry
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, %edx
|
|
; X86-NEXT: subb $64, %dl
|
|
; X86-NEXT: jb .LBB2_5
|
|
; X86-NEXT: # %bb.4: # %entry
|
|
; X86-NEXT: xorl %esi, %esi
|
|
; X86-NEXT: .LBB2_5: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: negb %dl
|
|
; X86-NEXT: movl %edi, %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: shrl %cl, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: shrdl %cl, %edi, %ebx
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: movl %esi, %ebp
|
|
; X86-NEXT: jne .LBB2_7
|
|
; X86-NEXT: # %bb.6: # %entry
|
|
; X86-NEXT: movl %ebx, %ebp
|
|
; X86-NEXT: .LBB2_7: # %entry
|
|
; X86-NEXT: movb %al, %ah
|
|
; X86-NEXT: addb $-64, %ah
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movb %ah, %cl
|
|
; X86-NEXT: shll %cl, %ebx
|
|
; X86-NEXT: testb $32, %ah
|
|
; X86-NEXT: movl $0, %ecx
|
|
; X86-NEXT: jne .LBB2_9
|
|
; X86-NEXT: # %bb.8: # %entry
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: .LBB2_9: # %entry
|
|
; X86-NEXT: cmpb $64, %al
|
|
; X86-NEXT: jb .LBB2_10
|
|
; X86-NEXT: # %bb.11: # %entry
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: jmp .LBB2_12
|
|
; X86-NEXT: .LBB2_10:
|
|
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
|
|
; X86-NEXT: orl %ebp, %ecx
|
|
; X86-NEXT: .LBB2_12: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: jne .LBB2_14
|
|
; X86-NEXT: # %bb.13: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB2_14: # %entry
|
|
; X86-NEXT: movl %edx, %esi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: shldl %cl, %ebp, %esi
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: jne .LBB2_16
|
|
; X86-NEXT: # %bb.15: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB2_16: # %entry
|
|
; X86-NEXT: movb %ah, %cl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: shldl %cl, %esi, %edi
|
|
; X86-NEXT: testb $32, %ah
|
|
; X86-NEXT: jne .LBB2_18
|
|
; X86-NEXT: # %bb.17: # %entry
|
|
; X86-NEXT: movl %edi, %ebx
|
|
; X86-NEXT: .LBB2_18: # %entry
|
|
; X86-NEXT: cmpb $64, %al
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: jae .LBB2_20
|
|
; X86-NEXT: # %bb.19:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X86-NEXT: .LBB2_20: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: testb %al, %al
|
|
; X86-NEXT: je .LBB2_22
|
|
; X86-NEXT: # %bb.21: # %entry
|
|
; X86-NEXT: movl %ebx, %edx
|
|
; X86-NEXT: movl (%esp), %ebp # 4-byte Reload
|
|
; X86-NEXT: .LBB2_22: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 4(%ecx)
|
|
; X86-NEXT: movl %esi, (%ecx)
|
|
; X86-NEXT: movl %edx, 12(%ecx)
|
|
; X86-NEXT: movl %ebp, 8(%ecx)
|
|
; X86-NEXT: addl $20, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_shl_i128:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: shldq %cl, %rdi, %rsi
|
|
; X64-NEXT: shlq %cl, %rdi
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: testb $64, %cl
|
|
; X64-NEXT: cmovneq %rdi, %rsi
|
|
; X64-NEXT: cmoveq %rdi, %rax
|
|
; X64-NEXT: movq %rsi, 8(%r8)
|
|
; X64-NEXT: movq %rax, (%r8)
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = shl i128 %x, %a
|
|
store i128 %0, i128* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_lshr_i128_outofrange(i128 %x, i128* nocapture %r) nounwind {
|
|
; CHECK-LABEL: test_lshr_i128_outofrange:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
entry:
|
|
%0 = lshr i128 %x, -1
|
|
store i128 %0, i128* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_ashr_i128_outofrange(i128 %x, i128* nocapture %r) nounwind {
|
|
; CHECK-LABEL: test_ashr_i128_outofrange:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
entry:
|
|
%0 = ashr i128 %x, -1
|
|
store i128 %0, i128* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_shl_i128_outofrange(i128 %x, i128* nocapture %r) nounwind {
|
|
; CHECK-LABEL: test_shl_i128_outofrange:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
entry:
|
|
%0 = shl i128 %x, -1
|
|
store i128 %0, i128* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
;
|
|
; Vectors
|
|
;
|
|
|
|
define void @test_lshr_v2i128(<2 x i128> %x, <2 x i128> %a, <2 x i128>* nocapture %r) nounwind {
|
|
; X86-LABEL: test_lshr_v2i128:
|
|
; X86: # %bb.0: # %entry
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $68, %esp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl %ebx, %edi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: shrl %cl, %edi
|
|
; X86-NEXT: movl %esi, %ebp
|
|
; X86-NEXT: shrl %cl, %ebp
|
|
; X86-NEXT: shrdl %cl, %esi, %edx
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: jne .LBB6_1
|
|
; X86-NEXT: # %bb.2: # %entry
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: jmp .LBB6_3
|
|
; X86-NEXT: .LBB6_1:
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: .LBB6_3: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: shrdl %cl, %ebx, %esi
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: jne .LBB6_5
|
|
; X86-NEXT: # %bb.4: # %entry
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: .LBB6_5: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: shrl %cl, %ebx
|
|
; X86-NEXT: shrl %cl, %ebp
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: subl $64, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl $0, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl $0, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl $0, %ecx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl $0, %ecx
|
|
; X86-NEXT: jne .LBB6_7
|
|
; X86-NEXT: # %bb.6: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: .LBB6_7: # %entry
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: shrdl %cl, %ebp, %esi
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: jne .LBB6_9
|
|
; X86-NEXT: # %bb.8: # %entry
|
|
; X86-NEXT: movl %esi, %ebx
|
|
; X86-NEXT: .LBB6_9: # %entry
|
|
; X86-NEXT: movl %edi, %esi
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl %ecx, %ebp
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: shrl %cl, %ebp
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: movl $0, %ecx
|
|
; X86-NEXT: jne .LBB6_11
|
|
; X86-NEXT: # %bb.10: # %entry
|
|
; X86-NEXT: movl %ebp, %ecx
|
|
; X86-NEXT: .LBB6_11: # %entry
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movb $64, %cl
|
|
; X86-NEXT: subb %dl, %cl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: shldl %cl, %ebx, %edi
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %edi
|
|
; X86-NEXT: shll %cl, %edi
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: movb $64, %bl
|
|
; X86-NEXT: jne .LBB6_12
|
|
; X86-NEXT: # %bb.13: # %entry
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: jmp .LBB6_14
|
|
; X86-NEXT: .LBB6_12:
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: .LBB6_14: # %entry
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: shrdl %cl, %ebp, %esi
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: jne .LBB6_16
|
|
; X86-NEXT: # %bb.15: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB6_16: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: subb %al, %bl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: shll %cl, %ebp
|
|
; X86-NEXT: testb $32, %bl
|
|
; X86-NEXT: movl $0, %ecx
|
|
; X86-NEXT: jne .LBB6_18
|
|
; X86-NEXT: # %bb.17: # %entry
|
|
; X86-NEXT: movl %ebp, %ecx
|
|
; X86-NEXT: .LBB6_18: # %entry
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: subl $64, %ecx
|
|
; X86-NEXT: sbbl $0, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: sbbl $0, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: sbbl $0, %esi
|
|
; X86-NEXT: setae %bh
|
|
; X86-NEXT: jb .LBB6_20
|
|
; X86-NEXT: # %bb.19: # %entry
|
|
; X86-NEXT: xorl %edi, %edi
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: .LBB6_20: # %entry
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: shrdl %cl, %esi, %edi
|
|
; X86-NEXT: shrl %cl, %esi
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: jne .LBB6_22
|
|
; X86-NEXT: # %bb.21: # %entry
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB6_22: # %entry
|
|
; X86-NEXT: testb %bh, %bh
|
|
; X86-NEXT: jne .LBB6_24
|
|
; X86-NEXT: # %bb.23:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB6_24: # %entry
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: movl $0, %ecx
|
|
; X86-NEXT: jne .LBB6_26
|
|
; X86-NEXT: # %bb.25: # %entry
|
|
; X86-NEXT: movl %esi, %ecx
|
|
; X86-NEXT: .LBB6_26: # %entry
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: shldl %cl, %edi, %esi
|
|
; X86-NEXT: testb $32, %bl
|
|
; X86-NEXT: jne .LBB6_28
|
|
; X86-NEXT: # %bb.27: # %entry
|
|
; X86-NEXT: movl %esi, %ebp
|
|
; X86-NEXT: .LBB6_28: # %entry
|
|
; X86-NEXT: testb %bh, %bh
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: jne .LBB6_30
|
|
; X86-NEXT: # %bb.29:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: orl %ebp, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB6_30: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: jne .LBB6_32
|
|
; X86-NEXT: # %bb.31: # %entry
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: .LBB6_32: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: shrdl %cl, %ebp, %edi
|
|
; X86-NEXT: movl %edi, %ebp
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: je .LBB6_33
|
|
; X86-NEXT: # %bb.34: # %entry
|
|
; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: jne .LBB6_35
|
|
; X86-NEXT: .LBB6_36: # %entry
|
|
; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: je .LBB6_38
|
|
; X86-NEXT: .LBB6_37:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB6_38: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: orl %ecx, %edx
|
|
; X86-NEXT: je .LBB6_40
|
|
; X86-NEXT: # %bb.39: # %entry
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: .LBB6_40: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: orl %edx, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: je .LBB6_42
|
|
; X86-NEXT: # %bb.41: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: .LBB6_42: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, 28(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, 24(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, 12(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, 8(%ecx)
|
|
; X86-NEXT: movl %esi, 20(%ecx)
|
|
; X86-NEXT: movl %eax, 16(%ecx)
|
|
; X86-NEXT: movl %ebx, 4(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, (%ecx)
|
|
; X86-NEXT: addl $68, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
; X86-NEXT: .LBB6_33: # %entry
|
|
; X86-NEXT: movl %ebp, %edi
|
|
; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: je .LBB6_36
|
|
; X86-NEXT: .LBB6_35:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, %edi
|
|
; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: jne .LBB6_37
|
|
; X86-NEXT: jmp .LBB6_38
|
|
;
|
|
; X64-LABEL: test_lshr_v2i128:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10
|
|
; X64-NEXT: movb {{[0-9]+}}(%rsp), %r9b
|
|
; X64-NEXT: movl %r9d, %ecx
|
|
; X64-NEXT: shrdq %cl, %rax, %rdx
|
|
; X64-NEXT: movl %r8d, %ecx
|
|
; X64-NEXT: shrdq %cl, %rsi, %rdi
|
|
; X64-NEXT: shrq %cl, %rsi
|
|
; X64-NEXT: xorl %r11d, %r11d
|
|
; X64-NEXT: testb $64, %r8b
|
|
; X64-NEXT: cmovneq %rsi, %rdi
|
|
; X64-NEXT: cmovneq %r11, %rsi
|
|
; X64-NEXT: movl %r9d, %ecx
|
|
; X64-NEXT: shrq %cl, %rax
|
|
; X64-NEXT: testb $64, %r9b
|
|
; X64-NEXT: cmovneq %rax, %rdx
|
|
; X64-NEXT: cmovneq %r11, %rax
|
|
; X64-NEXT: movq %rax, 24(%r10)
|
|
; X64-NEXT: movq %rdx, 16(%r10)
|
|
; X64-NEXT: movq %rsi, 8(%r10)
|
|
; X64-NEXT: movq %rdi, (%r10)
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = lshr <2 x i128> %x, %a
|
|
store <2 x i128> %0, <2 x i128>* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_ashr_v2i128(<2 x i128> %x, <2 x i128> %a, <2 x i128>* nocapture %r) nounwind {
|
|
; X86-LABEL: test_ashr_v2i128:
|
|
; X86: # %bb.0: # %entry
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $80, %esp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl %ebp, %ebx
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: sarl %cl, %ebx
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: shrl %cl, %edi
|
|
; X86-NEXT: shrdl %cl, %esi, %edx
|
|
; X86-NEXT: sarl $31, %ebp
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: jne .LBB7_1
|
|
; X86-NEXT: # %bb.2: # %entry
|
|
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: jmp .LBB7_3
|
|
; X86-NEXT: .LBB7_1:
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB7_3: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: shrdl %cl, %edx, %edi
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: jne .LBB7_5
|
|
; X86-NEXT: # %bb.4: # %entry
|
|
; X86-NEXT: movl %edi, %ebx
|
|
; X86-NEXT: .LBB7_5: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl %ecx, %ebp
|
|
; X86-NEXT: movl %ecx, %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: sarl %cl, %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: shrl %cl, %esi
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: sarl $31, %ebp
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: subl $64, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl $0, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl $0, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl $0, %ecx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: movl $0, %esi
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebp, %ecx
|
|
; X86-NEXT: jne .LBB7_7
|
|
; X86-NEXT: # %bb.6: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %edi, %ecx
|
|
; X86-NEXT: .LBB7_7: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: shrdl %cl, %ebp, %esi
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: jne .LBB7_9
|
|
; X86-NEXT: # %bb.8: # %entry
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: .LBB7_9: # %entry
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl %ecx, %esi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: sarl %cl, %esi
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: jne .LBB7_11
|
|
; X86-NEXT: # %bb.10: # %entry
|
|
; X86-NEXT: movl %esi, %ecx
|
|
; X86-NEXT: .LBB7_11: # %entry
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movb $64, %cl
|
|
; X86-NEXT: subb %dl, %cl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movl %ebx, %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: shldl %cl, %ebx, %ebp
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ebx, %ebp
|
|
; X86-NEXT: shll %cl, %ebp
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: movb $64, %bl
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: je .LBB7_13
|
|
; X86-NEXT: # %bb.12:
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: xorl %ebp, %ebp
|
|
; X86-NEXT: .LBB7_13: # %entry
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: shrdl %cl, %edi, %esi
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: jne .LBB7_15
|
|
; X86-NEXT: # %bb.14: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB7_15: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: subb %al, %bl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: shll %cl, %ebp
|
|
; X86-NEXT: testb $32, %bl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: jne .LBB7_17
|
|
; X86-NEXT: # %bb.16: # %entry
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB7_17: # %entry
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: subl $64, %ecx
|
|
; X86-NEXT: sbbl $0, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: sbbl $0, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: sbbl $0, %esi
|
|
; X86-NEXT: setae %bh
|
|
; X86-NEXT: jb .LBB7_19
|
|
; X86-NEXT: # %bb.18: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB7_19: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: shrdl %cl, %edi, %esi
|
|
; X86-NEXT: sarl %cl, %edi
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: je .LBB7_20
|
|
; X86-NEXT: # %bb.21: # %entry
|
|
; X86-NEXT: testb %bh, %bh
|
|
; X86-NEXT: je .LBB7_22
|
|
; X86-NEXT: .LBB7_23: # %entry
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: jne .LBB7_25
|
|
; X86-NEXT: .LBB7_24: # %entry
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB7_25: # %entry
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: shldl %cl, %esi, %edi
|
|
; X86-NEXT: testb $32, %bl
|
|
; X86-NEXT: jne .LBB7_27
|
|
; X86-NEXT: # %bb.26: # %entry
|
|
; X86-NEXT: movl %edi, %ebp
|
|
; X86-NEXT: .LBB7_27: # %entry
|
|
; X86-NEXT: testb %bh, %bh
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: jne .LBB7_29
|
|
; X86-NEXT: # %bb.28:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: orl %ebp, %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB7_29: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: jne .LBB7_31
|
|
; X86-NEXT: # %bb.30: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB7_31: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: shrdl %cl, %ebp, %ebx
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: jne .LBB7_33
|
|
; X86-NEXT: # %bb.32: # %entry
|
|
; X86-NEXT: movl %ebx, %esi
|
|
; X86-NEXT: .LBB7_33: # %entry
|
|
; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X86-NEXT: je .LBB7_35
|
|
; X86-NEXT: # %bb.34:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: orl %ebx, %ecx
|
|
; X86-NEXT: movl %ecx, %esi
|
|
; X86-NEXT: .LBB7_35: # %entry
|
|
; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: je .LBB7_37
|
|
; X86-NEXT: # %bb.36:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB7_37: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: orl %ecx, %edx
|
|
; X86-NEXT: je .LBB7_39
|
|
; X86-NEXT: # %bb.38: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB7_39: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: orl %edx, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: je .LBB7_41
|
|
; X86-NEXT: # %bb.40: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: .LBB7_41: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, 28(%ecx)
|
|
; X86-NEXT: movl %edi, 24(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, 12(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, 8(%ecx)
|
|
; X86-NEXT: movl %esi, 20(%ecx)
|
|
; X86-NEXT: movl %eax, 16(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, 4(%ecx)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, (%ecx)
|
|
; X86-NEXT: addl $80, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
; X86-NEXT: .LBB7_20: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: testb %bh, %bh
|
|
; X86-NEXT: jne .LBB7_23
|
|
; X86-NEXT: .LBB7_22:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: je .LBB7_24
|
|
; X86-NEXT: jmp .LBB7_25
|
|
;
|
|
; X64-LABEL: test_ashr_v2i128:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: movq %rcx, %r11
|
|
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10
|
|
; X64-NEXT: movb {{[0-9]+}}(%rsp), %r9b
|
|
; X64-NEXT: movl %r9d, %ecx
|
|
; X64-NEXT: shrdq %cl, %r11, %rdx
|
|
; X64-NEXT: movl %r8d, %ecx
|
|
; X64-NEXT: shrdq %cl, %rsi, %rdi
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: sarq %cl, %rax
|
|
; X64-NEXT: sarq $63, %rsi
|
|
; X64-NEXT: testb $64, %r8b
|
|
; X64-NEXT: cmovneq %rax, %rdi
|
|
; X64-NEXT: cmoveq %rax, %rsi
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: movl %r9d, %ecx
|
|
; X64-NEXT: sarq %cl, %rax
|
|
; X64-NEXT: sarq $63, %r11
|
|
; X64-NEXT: testb $64, %r9b
|
|
; X64-NEXT: cmovneq %rax, %rdx
|
|
; X64-NEXT: cmoveq %rax, %r11
|
|
; X64-NEXT: movq %r11, 24(%r10)
|
|
; X64-NEXT: movq %rdx, 16(%r10)
|
|
; X64-NEXT: movq %rsi, 8(%r10)
|
|
; X64-NEXT: movq %rdi, (%r10)
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = ashr <2 x i128> %x, %a
|
|
store <2 x i128> %0, <2 x i128>* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_shl_v2i128(<2 x i128> %x, <2 x i128> %a, <2 x i128>* nocapture %r) nounwind {
|
|
; X86-LABEL: test_shl_v2i128:
|
|
; X86: # %bb.0: # %entry
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: subl $72, %esp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: shll %cl, %ebp
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: shll %cl, %esi
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: subl $64, %eax
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: sbbl $0, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: sbbl $0, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: sbbl $0, %eax
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: testb $32, %bl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl $0, %eax
|
|
; X86-NEXT: movl $0, %ecx
|
|
; X86-NEXT: jne .LBB8_2
|
|
; X86-NEXT: # %bb.1: # %entry
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl %ebp, %ecx
|
|
; X86-NEXT: .LBB8_2: # %entry
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: shldl %cl, %edi, %eax
|
|
; X86-NEXT: testb $32, %bl
|
|
; X86-NEXT: jne .LBB8_4
|
|
; X86-NEXT: # %bb.3: # %entry
|
|
; X86-NEXT: movl %eax, %esi
|
|
; X86-NEXT: .LBB8_4: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movb $64, %cl
|
|
; X86-NEXT: subb %bl, %cl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl %edi, %esi
|
|
; X86-NEXT: shrl %cl, %esi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: shrdl %cl, %edi, %eax
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: jne .LBB8_5
|
|
; X86-NEXT: # %bb.6: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: jmp .LBB8_7
|
|
; X86-NEXT: .LBB8_5:
|
|
; X86-NEXT: movl %esi, %eax
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: .LBB8_7: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: shldl %cl, %esi, %edi
|
|
; X86-NEXT: testb $32, %bl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: jne .LBB8_9
|
|
; X86-NEXT: # %bb.8: # %entry
|
|
; X86-NEXT: movl %edi, %ebp
|
|
; X86-NEXT: .LBB8_9: # %entry
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ecx, %ebp
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: shll %cl, %ebp
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl %ecx, %esi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: shll %cl, %esi
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: movl $0, %edi
|
|
; X86-NEXT: movl $0, %ecx
|
|
; X86-NEXT: jne .LBB8_11
|
|
; X86-NEXT: # %bb.10: # %entry
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: movl %ebp, %ecx
|
|
; X86-NEXT: .LBB8_11: # %entry
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: shldl %cl, %ebx, %edi
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: jne .LBB8_13
|
|
; X86-NEXT: # %bb.12: # %entry
|
|
; X86-NEXT: movl %edi, %ebp
|
|
; X86-NEXT: .LBB8_13: # %entry
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movb $64, %cl
|
|
; X86-NEXT: subb %dl, %cl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: shrl %cl, %ebx
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: movl $0, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: jne .LBB8_15
|
|
; X86-NEXT: # %bb.14: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: .LBB8_15: # %entry
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl %edx, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: shldl %cl, %ebp, %edi
|
|
; X86-NEXT: testb $32, %dl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: jne .LBB8_17
|
|
; X86-NEXT: # %bb.16: # %entry
|
|
; X86-NEXT: movl %edi, %esi
|
|
; X86-NEXT: .LBB8_17: # %entry
|
|
; X86-NEXT: orl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl %ebx, %eax
|
|
; X86-NEXT: subl $64, %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl $0, %ecx
|
|
; X86-NEXT: movl %ebp, %ecx
|
|
; X86-NEXT: sbbl $0, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: sbbl $0, %ecx
|
|
; X86-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X86-NEXT: jb .LBB8_19
|
|
; X86-NEXT: # %bb.18: # %entry
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: .LBB8_19: # %entry
|
|
; X86-NEXT: jb .LBB8_21
|
|
; X86-NEXT: # %bb.20: # %entry
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: .LBB8_21: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X86-NEXT: movl %ebp, %ebx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: shll %cl, %ebx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: shldl %cl, %ebp, %edi
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: movl %ebx, %ecx
|
|
; X86-NEXT: jne .LBB8_23
|
|
; X86-NEXT: # %bb.22: # %entry
|
|
; X86-NEXT: movl %edi, %ecx
|
|
; X86-NEXT: .LBB8_23: # %entry
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl %ecx, %edi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: shll %cl, %edi
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: movl $0, %edi
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: jne .LBB8_25
|
|
; X86-NEXT: # %bb.24: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: .LBB8_25: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: jne .LBB8_27
|
|
; X86-NEXT: # %bb.26: # %entry
|
|
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB8_27: # %entry
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: shldl %cl, %edi, %esi
|
|
; X86-NEXT: testb $32, %al
|
|
; X86-NEXT: jne .LBB8_29
|
|
; X86-NEXT: # %bb.28: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB8_29: # %entry
|
|
; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Reload
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: jne .LBB8_30
|
|
; X86-NEXT: # %bb.31: # %entry
|
|
; X86-NEXT: testb %al, %al
|
|
; X86-NEXT: je .LBB8_32
|
|
; X86-NEXT: .LBB8_33: # %entry
|
|
; X86-NEXT: testb $32, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: jne .LBB8_35
|
|
; X86-NEXT: .LBB8_34: # %entry
|
|
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB8_35: # %entry
|
|
; X86-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: shrdl %cl, %ebx, %esi
|
|
; X86-NEXT: testb $32, %cl
|
|
; X86-NEXT: jne .LBB8_37
|
|
; X86-NEXT: # %bb.36: # %entry
|
|
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB8_37: # %entry
|
|
; X86-NEXT: testb %al, %al
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: jne .LBB8_38
|
|
; X86-NEXT: # %bb.39: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: testb %al, %al
|
|
; X86-NEXT: jne .LBB8_41
|
|
; X86-NEXT: jmp .LBB8_42
|
|
; X86-NEXT: .LBB8_30:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: orl %ebp, %ecx
|
|
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: testb %al, %al
|
|
; X86-NEXT: jne .LBB8_33
|
|
; X86-NEXT: .LBB8_32: # %entry
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X86-NEXT: testb $32, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
|
|
; X86-NEXT: je .LBB8_34
|
|
; X86-NEXT: jmp .LBB8_35
|
|
; X86-NEXT: .LBB8_38:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: testb %al, %al
|
|
; X86-NEXT: je .LBB8_42
|
|
; X86-NEXT: .LBB8_41:
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB8_42: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: orl %eax, %edx
|
|
; X86-NEXT: je .LBB8_44
|
|
; X86-NEXT: # %bb.43: # %entry
|
|
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X86-NEXT: .LBB8_44: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: orl {{[0-9]+}}(%esp), %ebx
|
|
; X86-NEXT: orl %edx, %ebx
|
|
; X86-NEXT: je .LBB8_46
|
|
; X86-NEXT: # %bb.45: # %entry
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X86-NEXT: .LBB8_46: # %entry
|
|
; X86-NEXT: movl %esi, 20(%eax)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, 16(%eax)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, 4(%eax)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X86-NEXT: movl %edx, (%eax)
|
|
; X86-NEXT: movl %edi, 28(%eax)
|
|
; X86-NEXT: movl %ecx, 24(%eax)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, 12(%eax)
|
|
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X86-NEXT: movl %ecx, 8(%eax)
|
|
; X86-NEXT: addl $72, %esp
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_shl_v2i128:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10
|
|
; X64-NEXT: movb {{[0-9]+}}(%rsp), %r9b
|
|
; X64-NEXT: movl %r9d, %ecx
|
|
; X64-NEXT: shldq %cl, %rdx, %rax
|
|
; X64-NEXT: movl %r8d, %ecx
|
|
; X64-NEXT: shldq %cl, %rdi, %rsi
|
|
; X64-NEXT: shlq %cl, %rdi
|
|
; X64-NEXT: xorl %r11d, %r11d
|
|
; X64-NEXT: testb $64, %r8b
|
|
; X64-NEXT: cmovneq %rdi, %rsi
|
|
; X64-NEXT: cmovneq %r11, %rdi
|
|
; X64-NEXT: movl %r9d, %ecx
|
|
; X64-NEXT: shlq %cl, %rdx
|
|
; X64-NEXT: testb $64, %r9b
|
|
; X64-NEXT: cmovneq %rdx, %rax
|
|
; X64-NEXT: cmovneq %r11, %rdx
|
|
; X64-NEXT: movq %rax, 24(%r10)
|
|
; X64-NEXT: movq %rdx, 16(%r10)
|
|
; X64-NEXT: movq %rsi, 8(%r10)
|
|
; X64-NEXT: movq %rdi, (%r10)
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = shl <2 x i128> %x, %a
|
|
store <2 x i128> %0, <2 x i128>* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_lshr_v2i128_outofrange(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
|
|
; CHECK-LABEL: test_lshr_v2i128_outofrange:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
entry:
|
|
%0 = lshr <2 x i128> %x, <i128 -1, i128 -1>
|
|
store <2 x i128> %0, <2 x i128>* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_ashr_v2i128_outofrange(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
|
|
; CHECK-LABEL: test_ashr_v2i128_outofrange:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
entry:
|
|
%0 = ashr <2 x i128> %x, <i128 -1, i128 -1>
|
|
store <2 x i128> %0, <2 x i128>* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_shl_v2i128_outofrange(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
|
|
; CHECK-LABEL: test_shl_v2i128_outofrange:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
entry:
|
|
%0 = shl <2 x i128> %x, <i128 -1, i128 -1>
|
|
store <2 x i128> %0, <2 x i128>* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_lshr_v2i128_outofrange_sum(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
|
|
; X86-LABEL: test_lshr_v2i128_outofrange_sum:
|
|
; X86: # %bb.0: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl $0, 28(%eax)
|
|
; X86-NEXT: movl $0, 24(%eax)
|
|
; X86-NEXT: movl $0, 20(%eax)
|
|
; X86-NEXT: movl $0, 16(%eax)
|
|
; X86-NEXT: movl $0, 12(%eax)
|
|
; X86-NEXT: movl $0, 8(%eax)
|
|
; X86-NEXT: movl $0, 4(%eax)
|
|
; X86-NEXT: movl $0, (%eax)
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_lshr_v2i128_outofrange_sum:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: xorps %xmm0, %xmm0
|
|
; X64-NEXT: movaps %xmm0, 16(%r8)
|
|
; X64-NEXT: movaps %xmm0, (%r8)
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = lshr <2 x i128> %x, <i128 -1, i128 -1>
|
|
%1 = lshr <2 x i128> %0, <i128 1, i128 1>
|
|
store <2 x i128> %1, <2 x i128>* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_ashr_v2i128_outofrange_sum(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
|
|
; X86-LABEL: test_ashr_v2i128_outofrange_sum:
|
|
; X86: # %bb.0: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl $0, 28(%eax)
|
|
; X86-NEXT: movl $0, 24(%eax)
|
|
; X86-NEXT: movl $0, 20(%eax)
|
|
; X86-NEXT: movl $0, 16(%eax)
|
|
; X86-NEXT: movl $0, 12(%eax)
|
|
; X86-NEXT: movl $0, 8(%eax)
|
|
; X86-NEXT: movl $0, 4(%eax)
|
|
; X86-NEXT: movl $0, (%eax)
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_ashr_v2i128_outofrange_sum:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: xorps %xmm0, %xmm0
|
|
; X64-NEXT: movaps %xmm0, 16(%r8)
|
|
; X64-NEXT: movaps %xmm0, (%r8)
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = ashr <2 x i128> %x, <i128 -1, i128 -1>
|
|
%1 = ashr <2 x i128> %0, <i128 1, i128 1>
|
|
store <2 x i128> %1, <2 x i128>* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test_shl_v2i128_outofrange_sum(<2 x i128> %x, <2 x i128>* nocapture %r) nounwind {
|
|
; X86-LABEL: test_shl_v2i128_outofrange_sum:
|
|
; X86: # %bb.0: # %entry
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl $0, 28(%eax)
|
|
; X86-NEXT: movl $0, 24(%eax)
|
|
; X86-NEXT: movl $0, 20(%eax)
|
|
; X86-NEXT: movl $0, 16(%eax)
|
|
; X86-NEXT: movl $0, 12(%eax)
|
|
; X86-NEXT: movl $0, 8(%eax)
|
|
; X86-NEXT: movl $0, 4(%eax)
|
|
; X86-NEXT: movl $0, (%eax)
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_shl_v2i128_outofrange_sum:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: xorps %xmm0, %xmm0
|
|
; X64-NEXT: movaps %xmm0, 16(%r8)
|
|
; X64-NEXT: movaps %xmm0, (%r8)
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = shl <2 x i128> %x, <i128 -1, i128 -1>
|
|
%1 = shl <2 x i128> %0, <i128 1, i128 1>
|
|
store <2 x i128> %1, <2 x i128>* %r, align 16
|
|
ret void
|
|
}
|
|
|
|
;
|
|
; Combines
|
|
;
|
|
|
|
define <2 x i256> @shl_sext_shl_outofrange(<2 x i128> %a0) {
|
|
; X86-LABEL: shl_sext_shl_outofrange:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl $0, 60(%eax)
|
|
; X86-NEXT: movl $0, 56(%eax)
|
|
; X86-NEXT: movl $0, 52(%eax)
|
|
; X86-NEXT: movl $0, 48(%eax)
|
|
; X86-NEXT: movl $0, 44(%eax)
|
|
; X86-NEXT: movl $0, 40(%eax)
|
|
; X86-NEXT: movl $0, 36(%eax)
|
|
; X86-NEXT: movl $0, 32(%eax)
|
|
; X86-NEXT: movl $0, 28(%eax)
|
|
; X86-NEXT: movl $0, 24(%eax)
|
|
; X86-NEXT: movl $0, 20(%eax)
|
|
; X86-NEXT: movl $0, 16(%eax)
|
|
; X86-NEXT: movl $0, 12(%eax)
|
|
; X86-NEXT: movl $0, 8(%eax)
|
|
; X86-NEXT: movl $0, 4(%eax)
|
|
; X86-NEXT: movl $0, (%eax)
|
|
; X86-NEXT: retl $4
|
|
;
|
|
; X64-LABEL: shl_sext_shl_outofrange:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: xorps %xmm0, %xmm0
|
|
; X64-NEXT: movaps %xmm0, 48(%rdi)
|
|
; X64-NEXT: movaps %xmm0, 32(%rdi)
|
|
; X64-NEXT: movaps %xmm0, 16(%rdi)
|
|
; X64-NEXT: movaps %xmm0, (%rdi)
|
|
; X64-NEXT: retq
|
|
%1 = shl <2 x i128> %a0, <i128 -1, i128 -1>
|
|
%2 = sext <2 x i128> %1 to <2 x i256>
|
|
%3 = shl <2 x i256> %2, <i256 128, i256 128>
|
|
ret <2 x i256> %3
|
|
}
|
|
|
|
define <2 x i256> @shl_zext_shl_outofrange(<2 x i128> %a0) {
|
|
; X86-LABEL: shl_zext_shl_outofrange:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl $0, 60(%eax)
|
|
; X86-NEXT: movl $0, 56(%eax)
|
|
; X86-NEXT: movl $0, 52(%eax)
|
|
; X86-NEXT: movl $0, 48(%eax)
|
|
; X86-NEXT: movl $0, 44(%eax)
|
|
; X86-NEXT: movl $0, 40(%eax)
|
|
; X86-NEXT: movl $0, 36(%eax)
|
|
; X86-NEXT: movl $0, 32(%eax)
|
|
; X86-NEXT: movl $0, 28(%eax)
|
|
; X86-NEXT: movl $0, 24(%eax)
|
|
; X86-NEXT: movl $0, 20(%eax)
|
|
; X86-NEXT: movl $0, 16(%eax)
|
|
; X86-NEXT: movl $0, 12(%eax)
|
|
; X86-NEXT: movl $0, 8(%eax)
|
|
; X86-NEXT: movl $0, 4(%eax)
|
|
; X86-NEXT: movl $0, (%eax)
|
|
; X86-NEXT: retl $4
|
|
;
|
|
; X64-LABEL: shl_zext_shl_outofrange:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: xorps %xmm0, %xmm0
|
|
; X64-NEXT: movaps %xmm0, 48(%rdi)
|
|
; X64-NEXT: movaps %xmm0, 32(%rdi)
|
|
; X64-NEXT: movaps %xmm0, 16(%rdi)
|
|
; X64-NEXT: movaps %xmm0, (%rdi)
|
|
; X64-NEXT: retq
|
|
%1 = shl <2 x i128> %a0, <i128 -1, i128 -1>
|
|
%2 = zext <2 x i128> %1 to <2 x i256>
|
|
%3 = shl <2 x i256> %2, <i256 128, i256 128>
|
|
ret <2 x i256> %3
|
|
}
|
|
|
|
define <2 x i256> @shl_zext_lshr_outofrange(<2 x i128> %a0) {
|
|
; X86-LABEL: shl_zext_lshr_outofrange:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl $0, 60(%eax)
|
|
; X86-NEXT: movl $0, 56(%eax)
|
|
; X86-NEXT: movl $0, 52(%eax)
|
|
; X86-NEXT: movl $0, 48(%eax)
|
|
; X86-NEXT: movl $0, 44(%eax)
|
|
; X86-NEXT: movl $0, 40(%eax)
|
|
; X86-NEXT: movl $0, 36(%eax)
|
|
; X86-NEXT: movl $0, 32(%eax)
|
|
; X86-NEXT: movl $0, 28(%eax)
|
|
; X86-NEXT: movl $0, 24(%eax)
|
|
; X86-NEXT: movl $0, 20(%eax)
|
|
; X86-NEXT: movl $0, 16(%eax)
|
|
; X86-NEXT: movl $0, 12(%eax)
|
|
; X86-NEXT: movl $0, 8(%eax)
|
|
; X86-NEXT: movl $0, 4(%eax)
|
|
; X86-NEXT: movl $0, (%eax)
|
|
; X86-NEXT: retl $4
|
|
;
|
|
; X64-LABEL: shl_zext_lshr_outofrange:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: xorps %xmm0, %xmm0
|
|
; X64-NEXT: movaps %xmm0, 48(%rdi)
|
|
; X64-NEXT: movaps %xmm0, 32(%rdi)
|
|
; X64-NEXT: movaps %xmm0, 16(%rdi)
|
|
; X64-NEXT: movaps %xmm0, (%rdi)
|
|
; X64-NEXT: retq
|
|
%1 = lshr <2 x i128> %a0, <i128 -1, i128 -1>
|
|
%2 = zext <2 x i128> %1 to <2 x i256>
|
|
%3 = shl <2 x i256> %2, <i256 128, i256 128>
|
|
ret <2 x i256> %3
|
|
}
|
|
|
|
define i128 @lshr_shl_mask(i128 %a0) {
|
|
; X86-LABEL: lshr_shl_mask:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: .cfi_def_cfa_offset 12
|
|
; X86-NEXT: .cfi_offset %esi, -12
|
|
; X86-NEXT: .cfi_offset %edi, -8
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movl $2147483647, %edi # imm = 0x7FFFFFFF
|
|
; X86-NEXT: andl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movl %edi, 12(%eax)
|
|
; X86-NEXT: movl %esi, 8(%eax)
|
|
; X86-NEXT: movl %edx, 4(%eax)
|
|
; X86-NEXT: movl %ecx, (%eax)
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: .cfi_def_cfa_offset 4
|
|
; X86-NEXT: retl $4
|
|
;
|
|
; X64-LABEL: lshr_shl_mask:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: movabsq $9223372036854775807, %rdx # imm = 0x7FFFFFFFFFFFFFFF
|
|
; X64-NEXT: andq %rsi, %rdx
|
|
; X64-NEXT: retq
|
|
%1 = shl i128 %a0, 1
|
|
%2 = lshr i128 %1, 1
|
|
ret i128 %2
|
|
}
|