forked from OSchip/llvm-project
4292 lines
159 KiB
LLVM
4292 lines
159 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
|
|
|
|
define i8 @test_bitreverse_i8(i8 %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_i8:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movb %dil, %al
|
|
; SSE-NEXT: shlb $7, %al
|
|
; SSE-NEXT: movb %dil, %cl
|
|
; SSE-NEXT: shlb $5, %cl
|
|
; SSE-NEXT: andb $64, %cl
|
|
; SSE-NEXT: movb %dil, %dl
|
|
; SSE-NEXT: shlb $3, %dl
|
|
; SSE-NEXT: andb $32, %dl
|
|
; SSE-NEXT: orb %cl, %dl
|
|
; SSE-NEXT: movb %dil, %cl
|
|
; SSE-NEXT: addb %cl, %cl
|
|
; SSE-NEXT: andb $16, %cl
|
|
; SSE-NEXT: orb %dl, %cl
|
|
; SSE-NEXT: movb %dil, %dl
|
|
; SSE-NEXT: shrb %dl
|
|
; SSE-NEXT: andb $8, %dl
|
|
; SSE-NEXT: orb %cl, %dl
|
|
; SSE-NEXT: movb %dil, %cl
|
|
; SSE-NEXT: shrb $3, %cl
|
|
; SSE-NEXT: andb $4, %cl
|
|
; SSE-NEXT: orb %dl, %cl
|
|
; SSE-NEXT: movb %dil, %dl
|
|
; SSE-NEXT: shrb $5, %dl
|
|
; SSE-NEXT: andb $2, %dl
|
|
; SSE-NEXT: orb %cl, %dl
|
|
; SSE-NEXT: shrb $7, %dil
|
|
; SSE-NEXT: orb %dl, %dil
|
|
; SSE-NEXT: orb %al, %dil
|
|
; SSE-NEXT: movb %dil, %al
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_bitreverse_i8:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: movb %dil, %al
|
|
; AVX-NEXT: shlb $7, %al
|
|
; AVX-NEXT: movb %dil, %cl
|
|
; AVX-NEXT: shlb $5, %cl
|
|
; AVX-NEXT: andb $64, %cl
|
|
; AVX-NEXT: movb %dil, %dl
|
|
; AVX-NEXT: shlb $3, %dl
|
|
; AVX-NEXT: andb $32, %dl
|
|
; AVX-NEXT: orb %cl, %dl
|
|
; AVX-NEXT: movb %dil, %cl
|
|
; AVX-NEXT: addb %cl, %cl
|
|
; AVX-NEXT: andb $16, %cl
|
|
; AVX-NEXT: orb %dl, %cl
|
|
; AVX-NEXT: movb %dil, %dl
|
|
; AVX-NEXT: shrb %dl
|
|
; AVX-NEXT: andb $8, %dl
|
|
; AVX-NEXT: orb %cl, %dl
|
|
; AVX-NEXT: movb %dil, %cl
|
|
; AVX-NEXT: shrb $3, %cl
|
|
; AVX-NEXT: andb $4, %cl
|
|
; AVX-NEXT: orb %dl, %cl
|
|
; AVX-NEXT: movb %dil, %dl
|
|
; AVX-NEXT: shrb $5, %dl
|
|
; AVX-NEXT: andb $2, %dl
|
|
; AVX-NEXT: orb %cl, %dl
|
|
; AVX-NEXT: shrb $7, %dil
|
|
; AVX-NEXT: orb %dl, %dil
|
|
; AVX-NEXT: orb %al, %dil
|
|
; AVX-NEXT: movb %dil, %al
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: test_bitreverse_i8:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vmovd %edi, %xmm0
|
|
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
|
|
; XOP-NEXT: vpextrb $0, %xmm0, %eax
|
|
; XOP-NEXT: retq
|
|
%b = call i8 @llvm.bitreverse.i8(i8 %a)
|
|
ret i8 %b
|
|
}
|
|
|
|
define i16 @test_bitreverse_i16(i16 %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_i16:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: andl $32768, %ecx # imm = 0x8000
|
|
; SSE-NEXT: movl %edi, %eax
|
|
; SSE-NEXT: shll $15, %eax
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: andl $2, %edx
|
|
; SSE-NEXT: shll $13, %edx
|
|
; SSE-NEXT: leal (%rdx,%rax), %eax
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: andl $4, %edx
|
|
; SSE-NEXT: shll $11, %edx
|
|
; SSE-NEXT: orl %edx, %eax
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: andl $8, %edx
|
|
; SSE-NEXT: shll $9, %edx
|
|
; SSE-NEXT: orl %edx, %eax
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: andl $16, %edx
|
|
; SSE-NEXT: shll $7, %edx
|
|
; SSE-NEXT: orl %edx, %eax
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: andl $32, %edx
|
|
; SSE-NEXT: shll $5, %edx
|
|
; SSE-NEXT: orl %edx, %eax
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: andl $64, %edx
|
|
; SSE-NEXT: shll $3, %edx
|
|
; SSE-NEXT: leal (%rdi,%rdi), %esi
|
|
; SSE-NEXT: andl $256, %esi # imm = 0x100
|
|
; SSE-NEXT: orl %edx, %esi
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shrl %edx
|
|
; SSE-NEXT: andl $128, %edx
|
|
; SSE-NEXT: orl %esi, %edx
|
|
; SSE-NEXT: movl %edi, %esi
|
|
; SSE-NEXT: shrl $3, %esi
|
|
; SSE-NEXT: andl $64, %esi
|
|
; SSE-NEXT: orl %edx, %esi
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shrl $5, %edx
|
|
; SSE-NEXT: andl $32, %edx
|
|
; SSE-NEXT: orl %esi, %edx
|
|
; SSE-NEXT: movl %edi, %esi
|
|
; SSE-NEXT: shrl $7, %esi
|
|
; SSE-NEXT: andl $16, %esi
|
|
; SSE-NEXT: orl %edx, %esi
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shrl $9, %edx
|
|
; SSE-NEXT: andl $8, %edx
|
|
; SSE-NEXT: orl %esi, %edx
|
|
; SSE-NEXT: movl %edi, %esi
|
|
; SSE-NEXT: shrl $11, %esi
|
|
; SSE-NEXT: andl $4, %esi
|
|
; SSE-NEXT: orl %edx, %esi
|
|
; SSE-NEXT: shrl $13, %edi
|
|
; SSE-NEXT: andl $2, %edi
|
|
; SSE-NEXT: orl %esi, %edi
|
|
; SSE-NEXT: shrl $15, %ecx
|
|
; SSE-NEXT: orl %edi, %ecx
|
|
; SSE-NEXT: orl %ecx, %eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_bitreverse_i16:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: andl $32768, %ecx # imm = 0x8000
|
|
; AVX-NEXT: movl %edi, %eax
|
|
; AVX-NEXT: shll $15, %eax
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: andl $2, %edx
|
|
; AVX-NEXT: shll $13, %edx
|
|
; AVX-NEXT: leal (%rdx,%rax), %eax
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: andl $4, %edx
|
|
; AVX-NEXT: shll $11, %edx
|
|
; AVX-NEXT: orl %edx, %eax
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: andl $8, %edx
|
|
; AVX-NEXT: shll $9, %edx
|
|
; AVX-NEXT: orl %edx, %eax
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: andl $16, %edx
|
|
; AVX-NEXT: shll $7, %edx
|
|
; AVX-NEXT: orl %edx, %eax
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: andl $32, %edx
|
|
; AVX-NEXT: shll $5, %edx
|
|
; AVX-NEXT: orl %edx, %eax
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: andl $64, %edx
|
|
; AVX-NEXT: shll $3, %edx
|
|
; AVX-NEXT: leal (%rdi,%rdi), %esi
|
|
; AVX-NEXT: andl $256, %esi # imm = 0x100
|
|
; AVX-NEXT: orl %edx, %esi
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shrl %edx
|
|
; AVX-NEXT: andl $128, %edx
|
|
; AVX-NEXT: orl %esi, %edx
|
|
; AVX-NEXT: movl %edi, %esi
|
|
; AVX-NEXT: shrl $3, %esi
|
|
; AVX-NEXT: andl $64, %esi
|
|
; AVX-NEXT: orl %edx, %esi
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shrl $5, %edx
|
|
; AVX-NEXT: andl $32, %edx
|
|
; AVX-NEXT: orl %esi, %edx
|
|
; AVX-NEXT: movl %edi, %esi
|
|
; AVX-NEXT: shrl $7, %esi
|
|
; AVX-NEXT: andl $16, %esi
|
|
; AVX-NEXT: orl %edx, %esi
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shrl $9, %edx
|
|
; AVX-NEXT: andl $8, %edx
|
|
; AVX-NEXT: orl %esi, %edx
|
|
; AVX-NEXT: movl %edi, %esi
|
|
; AVX-NEXT: shrl $11, %esi
|
|
; AVX-NEXT: andl $4, %esi
|
|
; AVX-NEXT: orl %edx, %esi
|
|
; AVX-NEXT: shrl $13, %edi
|
|
; AVX-NEXT: andl $2, %edi
|
|
; AVX-NEXT: orl %esi, %edi
|
|
; AVX-NEXT: shrl $15, %ecx
|
|
; AVX-NEXT: orl %edi, %ecx
|
|
; AVX-NEXT: orl %ecx, %eax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: test_bitreverse_i16:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vmovd %edi, %xmm0
|
|
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
|
|
; XOP-NEXT: vmovd %xmm0, %eax
|
|
; XOP-NEXT: retq
|
|
%b = call i16 @llvm.bitreverse.i16(i16 %a)
|
|
ret i16 %b
|
|
}
|
|
|
|
define i32 @test_bitreverse_i32(i32 %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_i32:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movl %edi, %eax
|
|
; SSE-NEXT: shll $31, %eax
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: andl $2, %ecx
|
|
; SSE-NEXT: shll $29, %ecx
|
|
; SSE-NEXT: leal (%rcx,%rax), %eax
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: andl $4, %ecx
|
|
; SSE-NEXT: shll $27, %ecx
|
|
; SSE-NEXT: orl %ecx, %eax
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: andl $8, %ecx
|
|
; SSE-NEXT: shll $25, %ecx
|
|
; SSE-NEXT: orl %ecx, %eax
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: andl $16, %ecx
|
|
; SSE-NEXT: shll $23, %ecx
|
|
; SSE-NEXT: orl %ecx, %eax
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: andl $32, %ecx
|
|
; SSE-NEXT: shll $21, %ecx
|
|
; SSE-NEXT: orl %ecx, %eax
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: andl $64, %ecx
|
|
; SSE-NEXT: shll $19, %ecx
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shll $17, %edx
|
|
; SSE-NEXT: andl $16777216, %edx # imm = 0x1000000
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: shll $15, %ecx
|
|
; SSE-NEXT: andl $8388608, %ecx # imm = 0x800000
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shll $13, %edx
|
|
; SSE-NEXT: andl $4194304, %edx # imm = 0x400000
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: shll $11, %ecx
|
|
; SSE-NEXT: andl $2097152, %ecx # imm = 0x200000
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shll $9, %edx
|
|
; SSE-NEXT: andl $1048576, %edx # imm = 0x100000
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: shll $7, %ecx
|
|
; SSE-NEXT: andl $524288, %ecx # imm = 0x80000
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shll $5, %edx
|
|
; SSE-NEXT: andl $262144, %edx # imm = 0x40000
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: leal (,%rdi,8), %ecx
|
|
; SSE-NEXT: andl $131072, %ecx # imm = 0x20000
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: leal (%rdi,%rdi), %edx
|
|
; SSE-NEXT: andl $65536, %edx # imm = 0x10000
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: shrl %ecx
|
|
; SSE-NEXT: andl $32768, %ecx # imm = 0x8000
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shrl $3, %edx
|
|
; SSE-NEXT: andl $16384, %edx # imm = 0x4000
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: shrl $5, %ecx
|
|
; SSE-NEXT: andl $8192, %ecx # imm = 0x2000
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shrl $7, %edx
|
|
; SSE-NEXT: andl $4096, %edx # imm = 0x1000
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: shrl $9, %ecx
|
|
; SSE-NEXT: andl $2048, %ecx # imm = 0x800
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shrl $11, %edx
|
|
; SSE-NEXT: andl $1024, %edx # imm = 0x400
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: shrl $13, %ecx
|
|
; SSE-NEXT: andl $512, %ecx # imm = 0x200
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shrl $15, %edx
|
|
; SSE-NEXT: andl $256, %edx # imm = 0x100
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: shrl $17, %ecx
|
|
; SSE-NEXT: andl $128, %ecx
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shrl $19, %edx
|
|
; SSE-NEXT: andl $64, %edx
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: shrl $21, %ecx
|
|
; SSE-NEXT: andl $32, %ecx
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shrl $23, %edx
|
|
; SSE-NEXT: andl $16, %edx
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: shrl $25, %ecx
|
|
; SSE-NEXT: andl $8, %ecx
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: movl %edi, %edx
|
|
; SSE-NEXT: shrl $27, %edx
|
|
; SSE-NEXT: andl $4, %edx
|
|
; SSE-NEXT: orl %ecx, %edx
|
|
; SSE-NEXT: movl %edi, %ecx
|
|
; SSE-NEXT: shrl $29, %ecx
|
|
; SSE-NEXT: andl $2, %ecx
|
|
; SSE-NEXT: orl %edx, %ecx
|
|
; SSE-NEXT: shrl $31, %edi
|
|
; SSE-NEXT: orl %ecx, %edi
|
|
; SSE-NEXT: orl %edi, %eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_bitreverse_i32:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: movl %edi, %eax
|
|
; AVX-NEXT: shll $31, %eax
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: andl $2, %ecx
|
|
; AVX-NEXT: shll $29, %ecx
|
|
; AVX-NEXT: leal (%rcx,%rax), %eax
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: andl $4, %ecx
|
|
; AVX-NEXT: shll $27, %ecx
|
|
; AVX-NEXT: orl %ecx, %eax
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: andl $8, %ecx
|
|
; AVX-NEXT: shll $25, %ecx
|
|
; AVX-NEXT: orl %ecx, %eax
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: andl $16, %ecx
|
|
; AVX-NEXT: shll $23, %ecx
|
|
; AVX-NEXT: orl %ecx, %eax
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: andl $32, %ecx
|
|
; AVX-NEXT: shll $21, %ecx
|
|
; AVX-NEXT: orl %ecx, %eax
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: andl $64, %ecx
|
|
; AVX-NEXT: shll $19, %ecx
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shll $17, %edx
|
|
; AVX-NEXT: andl $16777216, %edx # imm = 0x1000000
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: shll $15, %ecx
|
|
; AVX-NEXT: andl $8388608, %ecx # imm = 0x800000
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shll $13, %edx
|
|
; AVX-NEXT: andl $4194304, %edx # imm = 0x400000
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: shll $11, %ecx
|
|
; AVX-NEXT: andl $2097152, %ecx # imm = 0x200000
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shll $9, %edx
|
|
; AVX-NEXT: andl $1048576, %edx # imm = 0x100000
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: shll $7, %ecx
|
|
; AVX-NEXT: andl $524288, %ecx # imm = 0x80000
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shll $5, %edx
|
|
; AVX-NEXT: andl $262144, %edx # imm = 0x40000
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: leal (,%rdi,8), %ecx
|
|
; AVX-NEXT: andl $131072, %ecx # imm = 0x20000
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: leal (%rdi,%rdi), %edx
|
|
; AVX-NEXT: andl $65536, %edx # imm = 0x10000
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: shrl %ecx
|
|
; AVX-NEXT: andl $32768, %ecx # imm = 0x8000
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shrl $3, %edx
|
|
; AVX-NEXT: andl $16384, %edx # imm = 0x4000
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: shrl $5, %ecx
|
|
; AVX-NEXT: andl $8192, %ecx # imm = 0x2000
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shrl $7, %edx
|
|
; AVX-NEXT: andl $4096, %edx # imm = 0x1000
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: shrl $9, %ecx
|
|
; AVX-NEXT: andl $2048, %ecx # imm = 0x800
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shrl $11, %edx
|
|
; AVX-NEXT: andl $1024, %edx # imm = 0x400
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: shrl $13, %ecx
|
|
; AVX-NEXT: andl $512, %ecx # imm = 0x200
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shrl $15, %edx
|
|
; AVX-NEXT: andl $256, %edx # imm = 0x100
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: shrl $17, %ecx
|
|
; AVX-NEXT: andl $128, %ecx
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shrl $19, %edx
|
|
; AVX-NEXT: andl $64, %edx
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: shrl $21, %ecx
|
|
; AVX-NEXT: andl $32, %ecx
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shrl $23, %edx
|
|
; AVX-NEXT: andl $16, %edx
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: shrl $25, %ecx
|
|
; AVX-NEXT: andl $8, %ecx
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: movl %edi, %edx
|
|
; AVX-NEXT: shrl $27, %edx
|
|
; AVX-NEXT: andl $4, %edx
|
|
; AVX-NEXT: orl %ecx, %edx
|
|
; AVX-NEXT: movl %edi, %ecx
|
|
; AVX-NEXT: shrl $29, %ecx
|
|
; AVX-NEXT: andl $2, %ecx
|
|
; AVX-NEXT: orl %edx, %ecx
|
|
; AVX-NEXT: shrl $31, %edi
|
|
; AVX-NEXT: orl %ecx, %edi
|
|
; AVX-NEXT: orl %edi, %eax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: test_bitreverse_i32:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vmovd %edi, %xmm0
|
|
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
|
|
; XOP-NEXT: vmovd %xmm0, %eax
|
|
; XOP-NEXT: retq
|
|
%b = call i32 @llvm.bitreverse.i32(i32 %a)
|
|
ret i32 %b
|
|
}
|
|
|
|
define i64 @test_bitreverse_i64(i64 %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_i64:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: leaq (%rdi,%rdi), %rax
|
|
; SSE-NEXT: movabsq $4294967296, %rcx # imm = 0x100000000
|
|
; SSE-NEXT: andq %rax, %rcx
|
|
; SSE-NEXT: movq %rdi, %rax
|
|
; SSE-NEXT: shlq $63, %rax
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $2, %rdx
|
|
; SSE-NEXT: shlq $61, %rdx
|
|
; SSE-NEXT: leaq (%rdx,%rax), %rax
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $4, %rdx
|
|
; SSE-NEXT: shlq $59, %rdx
|
|
; SSE-NEXT: orq %rdx, %rax
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $8, %rdx
|
|
; SSE-NEXT: shlq $57, %rdx
|
|
; SSE-NEXT: orq %rdx, %rax
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $16, %rdx
|
|
; SSE-NEXT: shlq $55, %rdx
|
|
; SSE-NEXT: orq %rdx, %rax
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $32, %rdx
|
|
; SSE-NEXT: shlq $53, %rdx
|
|
; SSE-NEXT: orq %rdx, %rax
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $64, %rdx
|
|
; SSE-NEXT: shlq $51, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $128, %rsi
|
|
; SSE-NEXT: shlq $49, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $256, %rdx # imm = 0x100
|
|
; SSE-NEXT: shlq $47, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $512, %rsi # imm = 0x200
|
|
; SSE-NEXT: shlq $45, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $1024, %rdx # imm = 0x400
|
|
; SSE-NEXT: shlq $43, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $2048, %rsi # imm = 0x800
|
|
; SSE-NEXT: shlq $41, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $4096, %rdx # imm = 0x1000
|
|
; SSE-NEXT: shlq $39, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $8192, %rsi # imm = 0x2000
|
|
; SSE-NEXT: shlq $37, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $16384, %rdx # imm = 0x4000
|
|
; SSE-NEXT: shlq $35, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $32768, %rsi # imm = 0x8000
|
|
; SSE-NEXT: shlq $33, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $65536, %rdx # imm = 0x10000
|
|
; SSE-NEXT: shlq $31, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $131072, %rsi # imm = 0x20000
|
|
; SSE-NEXT: shlq $29, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $262144, %rdx # imm = 0x40000
|
|
; SSE-NEXT: shlq $27, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $524288, %rsi # imm = 0x80000
|
|
; SSE-NEXT: shlq $25, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $1048576, %rdx # imm = 0x100000
|
|
; SSE-NEXT: shlq $23, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $2097152, %rsi # imm = 0x200000
|
|
; SSE-NEXT: shlq $21, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $4194304, %rdx # imm = 0x400000
|
|
; SSE-NEXT: shlq $19, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $8388608, %rsi # imm = 0x800000
|
|
; SSE-NEXT: shlq $17, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $16777216, %rdx # imm = 0x1000000
|
|
; SSE-NEXT: shlq $15, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $33554432, %rsi # imm = 0x2000000
|
|
; SSE-NEXT: shlq $13, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $67108864, %rdx # imm = 0x4000000
|
|
; SSE-NEXT: shlq $11, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $134217728, %rsi # imm = 0x8000000
|
|
; SSE-NEXT: shlq $9, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $268435456, %rdx # imm = 0x10000000
|
|
; SSE-NEXT: shlq $7, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: movq %rdi, %rsi
|
|
; SSE-NEXT: andq $536870912, %rsi # imm = 0x20000000
|
|
; SSE-NEXT: shlq $5, %rsi
|
|
; SSE-NEXT: orq %rdx, %rsi
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: andq $1073741824, %rdx # imm = 0x40000000
|
|
; SSE-NEXT: shlq $3, %rdx
|
|
; SSE-NEXT: orq %rsi, %rdx
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq %rcx
|
|
; SSE-NEXT: andl $-2147483648, %ecx # imm = 0xFFFFFFFF80000000
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $3, %rdx
|
|
; SSE-NEXT: andl $1073741824, %edx # imm = 0x40000000
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $5, %rcx
|
|
; SSE-NEXT: andl $536870912, %ecx # imm = 0x20000000
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $7, %rdx
|
|
; SSE-NEXT: andl $268435456, %edx # imm = 0x10000000
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $9, %rcx
|
|
; SSE-NEXT: andl $134217728, %ecx # imm = 0x8000000
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $11, %rdx
|
|
; SSE-NEXT: andl $67108864, %edx # imm = 0x4000000
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $13, %rcx
|
|
; SSE-NEXT: andl $33554432, %ecx # imm = 0x2000000
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $15, %rdx
|
|
; SSE-NEXT: andl $16777216, %edx # imm = 0x1000000
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $17, %rcx
|
|
; SSE-NEXT: andl $8388608, %ecx # imm = 0x800000
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $19, %rdx
|
|
; SSE-NEXT: andl $4194304, %edx # imm = 0x400000
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $21, %rcx
|
|
; SSE-NEXT: andl $2097152, %ecx # imm = 0x200000
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $23, %rdx
|
|
; SSE-NEXT: andl $1048576, %edx # imm = 0x100000
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $25, %rcx
|
|
; SSE-NEXT: andl $524288, %ecx # imm = 0x80000
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $27, %rdx
|
|
; SSE-NEXT: andl $262144, %edx # imm = 0x40000
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $29, %rcx
|
|
; SSE-NEXT: andl $131072, %ecx # imm = 0x20000
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $31, %rdx
|
|
; SSE-NEXT: andl $65536, %edx # imm = 0x10000
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $33, %rcx
|
|
; SSE-NEXT: andl $32768, %ecx # imm = 0x8000
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $35, %rdx
|
|
; SSE-NEXT: andl $16384, %edx # imm = 0x4000
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $37, %rcx
|
|
; SSE-NEXT: andl $8192, %ecx # imm = 0x2000
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $39, %rdx
|
|
; SSE-NEXT: andl $4096, %edx # imm = 0x1000
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $41, %rcx
|
|
; SSE-NEXT: andl $2048, %ecx # imm = 0x800
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $43, %rdx
|
|
; SSE-NEXT: andl $1024, %edx # imm = 0x400
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $45, %rcx
|
|
; SSE-NEXT: andl $512, %ecx # imm = 0x200
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $47, %rdx
|
|
; SSE-NEXT: andl $256, %edx # imm = 0x100
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $49, %rcx
|
|
; SSE-NEXT: andl $128, %ecx
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $51, %rdx
|
|
; SSE-NEXT: andl $64, %edx
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $53, %rcx
|
|
; SSE-NEXT: andl $32, %ecx
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $55, %rdx
|
|
; SSE-NEXT: andl $16, %edx
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $57, %rcx
|
|
; SSE-NEXT: andl $8, %ecx
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: movq %rdi, %rdx
|
|
; SSE-NEXT: shrq $59, %rdx
|
|
; SSE-NEXT: andl $4, %edx
|
|
; SSE-NEXT: orq %rcx, %rdx
|
|
; SSE-NEXT: movq %rdi, %rcx
|
|
; SSE-NEXT: shrq $61, %rcx
|
|
; SSE-NEXT: andl $2, %ecx
|
|
; SSE-NEXT: orq %rdx, %rcx
|
|
; SSE-NEXT: shrq $63, %rdi
|
|
; SSE-NEXT: orq %rcx, %rdi
|
|
; SSE-NEXT: orq %rdi, %rax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_bitreverse_i64:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: leaq (%rdi,%rdi), %rax
|
|
; AVX-NEXT: movabsq $4294967296, %rcx # imm = 0x100000000
|
|
; AVX-NEXT: andq %rax, %rcx
|
|
; AVX-NEXT: movq %rdi, %rax
|
|
; AVX-NEXT: shlq $63, %rax
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $2, %rdx
|
|
; AVX-NEXT: shlq $61, %rdx
|
|
; AVX-NEXT: leaq (%rdx,%rax), %rax
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $4, %rdx
|
|
; AVX-NEXT: shlq $59, %rdx
|
|
; AVX-NEXT: orq %rdx, %rax
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $8, %rdx
|
|
; AVX-NEXT: shlq $57, %rdx
|
|
; AVX-NEXT: orq %rdx, %rax
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $16, %rdx
|
|
; AVX-NEXT: shlq $55, %rdx
|
|
; AVX-NEXT: orq %rdx, %rax
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $32, %rdx
|
|
; AVX-NEXT: shlq $53, %rdx
|
|
; AVX-NEXT: orq %rdx, %rax
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $64, %rdx
|
|
; AVX-NEXT: shlq $51, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $128, %rsi
|
|
; AVX-NEXT: shlq $49, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $256, %rdx # imm = 0x100
|
|
; AVX-NEXT: shlq $47, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $512, %rsi # imm = 0x200
|
|
; AVX-NEXT: shlq $45, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $1024, %rdx # imm = 0x400
|
|
; AVX-NEXT: shlq $43, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $2048, %rsi # imm = 0x800
|
|
; AVX-NEXT: shlq $41, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $4096, %rdx # imm = 0x1000
|
|
; AVX-NEXT: shlq $39, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $8192, %rsi # imm = 0x2000
|
|
; AVX-NEXT: shlq $37, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $16384, %rdx # imm = 0x4000
|
|
; AVX-NEXT: shlq $35, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $32768, %rsi # imm = 0x8000
|
|
; AVX-NEXT: shlq $33, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $65536, %rdx # imm = 0x10000
|
|
; AVX-NEXT: shlq $31, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $131072, %rsi # imm = 0x20000
|
|
; AVX-NEXT: shlq $29, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $262144, %rdx # imm = 0x40000
|
|
; AVX-NEXT: shlq $27, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $524288, %rsi # imm = 0x80000
|
|
; AVX-NEXT: shlq $25, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $1048576, %rdx # imm = 0x100000
|
|
; AVX-NEXT: shlq $23, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $2097152, %rsi # imm = 0x200000
|
|
; AVX-NEXT: shlq $21, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $4194304, %rdx # imm = 0x400000
|
|
; AVX-NEXT: shlq $19, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $8388608, %rsi # imm = 0x800000
|
|
; AVX-NEXT: shlq $17, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $16777216, %rdx # imm = 0x1000000
|
|
; AVX-NEXT: shlq $15, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $33554432, %rsi # imm = 0x2000000
|
|
; AVX-NEXT: shlq $13, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $67108864, %rdx # imm = 0x4000000
|
|
; AVX-NEXT: shlq $11, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $134217728, %rsi # imm = 0x8000000
|
|
; AVX-NEXT: shlq $9, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $268435456, %rdx # imm = 0x10000000
|
|
; AVX-NEXT: shlq $7, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: movq %rdi, %rsi
|
|
; AVX-NEXT: andq $536870912, %rsi # imm = 0x20000000
|
|
; AVX-NEXT: shlq $5, %rsi
|
|
; AVX-NEXT: orq %rdx, %rsi
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: andq $1073741824, %rdx # imm = 0x40000000
|
|
; AVX-NEXT: shlq $3, %rdx
|
|
; AVX-NEXT: orq %rsi, %rdx
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq %rcx
|
|
; AVX-NEXT: andl $-2147483648, %ecx # imm = 0xFFFFFFFF80000000
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $3, %rdx
|
|
; AVX-NEXT: andl $1073741824, %edx # imm = 0x40000000
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $5, %rcx
|
|
; AVX-NEXT: andl $536870912, %ecx # imm = 0x20000000
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $7, %rdx
|
|
; AVX-NEXT: andl $268435456, %edx # imm = 0x10000000
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $9, %rcx
|
|
; AVX-NEXT: andl $134217728, %ecx # imm = 0x8000000
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $11, %rdx
|
|
; AVX-NEXT: andl $67108864, %edx # imm = 0x4000000
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $13, %rcx
|
|
; AVX-NEXT: andl $33554432, %ecx # imm = 0x2000000
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $15, %rdx
|
|
; AVX-NEXT: andl $16777216, %edx # imm = 0x1000000
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $17, %rcx
|
|
; AVX-NEXT: andl $8388608, %ecx # imm = 0x800000
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $19, %rdx
|
|
; AVX-NEXT: andl $4194304, %edx # imm = 0x400000
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $21, %rcx
|
|
; AVX-NEXT: andl $2097152, %ecx # imm = 0x200000
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $23, %rdx
|
|
; AVX-NEXT: andl $1048576, %edx # imm = 0x100000
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $25, %rcx
|
|
; AVX-NEXT: andl $524288, %ecx # imm = 0x80000
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $27, %rdx
|
|
; AVX-NEXT: andl $262144, %edx # imm = 0x40000
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $29, %rcx
|
|
; AVX-NEXT: andl $131072, %ecx # imm = 0x20000
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $31, %rdx
|
|
; AVX-NEXT: andl $65536, %edx # imm = 0x10000
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $33, %rcx
|
|
; AVX-NEXT: andl $32768, %ecx # imm = 0x8000
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $35, %rdx
|
|
; AVX-NEXT: andl $16384, %edx # imm = 0x4000
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $37, %rcx
|
|
; AVX-NEXT: andl $8192, %ecx # imm = 0x2000
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $39, %rdx
|
|
; AVX-NEXT: andl $4096, %edx # imm = 0x1000
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $41, %rcx
|
|
; AVX-NEXT: andl $2048, %ecx # imm = 0x800
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $43, %rdx
|
|
; AVX-NEXT: andl $1024, %edx # imm = 0x400
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $45, %rcx
|
|
; AVX-NEXT: andl $512, %ecx # imm = 0x200
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $47, %rdx
|
|
; AVX-NEXT: andl $256, %edx # imm = 0x100
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $49, %rcx
|
|
; AVX-NEXT: andl $128, %ecx
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $51, %rdx
|
|
; AVX-NEXT: andl $64, %edx
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $53, %rcx
|
|
; AVX-NEXT: andl $32, %ecx
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $55, %rdx
|
|
; AVX-NEXT: andl $16, %edx
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $57, %rcx
|
|
; AVX-NEXT: andl $8, %ecx
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: movq %rdi, %rdx
|
|
; AVX-NEXT: shrq $59, %rdx
|
|
; AVX-NEXT: andl $4, %edx
|
|
; AVX-NEXT: orq %rcx, %rdx
|
|
; AVX-NEXT: movq %rdi, %rcx
|
|
; AVX-NEXT: shrq $61, %rcx
|
|
; AVX-NEXT: andl $2, %ecx
|
|
; AVX-NEXT: orq %rdx, %rcx
|
|
; AVX-NEXT: shrq $63, %rdi
|
|
; AVX-NEXT: orq %rcx, %rdi
|
|
; AVX-NEXT: orq %rdi, %rax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: test_bitreverse_i64:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vmovq %rdi, %xmm0
|
|
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
|
|
; XOP-NEXT: vmovq %xmm0, %rax
|
|
; XOP-NEXT: retq
|
|
%b = call i64 @llvm.bitreverse.i64(i64 %a)
|
|
ret i64 %b
|
|
}
|
|
|
|
define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_v16i8:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlw $7, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
|
; SSE-NEXT: pand %xmm1, %xmm1
|
|
; SSE-NEXT: pand %xmm2, %xmm1
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllw $7, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
|
|
; SSE-NEXT: pand %xmm3, %xmm3
|
|
; SSE-NEXT: pand %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllw $5, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE-NEXT: psllw $3, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm3, %xmm4
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: paddb %xmm3, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm4, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE-NEXT: psrlw $1, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm3, %xmm4
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlw $3, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm4, %xmm3
|
|
; SSE-NEXT: psrlw $5, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm3, %xmm0
|
|
; SSE-NEXT: por %xmm1, %xmm0
|
|
; SSE-NEXT: por %xmm2, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_bitreverse_v16i8:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: vpsrlw $7, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
|
; AVX-NEXT: vpand %xmm2, %xmm2, %xmm2
|
|
; AVX-NEXT: vpand %xmm2, %xmm1, %xmm1
|
|
; AVX-NEXT: vpsllw $7, %xmm0, %xmm2
|
|
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
|
|
; AVX-NEXT: vpand %xmm3, %xmm3, %xmm3
|
|
; AVX-NEXT: vpand %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllw $5, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpsllw $3, %xmm0, %xmm4
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
|
; AVX-NEXT: vpor %xmm4, %xmm3, %xmm3
|
|
; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm4
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
|
; AVX-NEXT: vpor %xmm4, %xmm3, %xmm3
|
|
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm4
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
|
; AVX-NEXT: vpor %xmm4, %xmm3, %xmm3
|
|
; AVX-NEXT: vpsrlw $3, %xmm0, %xmm4
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
|
|
; AVX-NEXT: vpor %xmm4, %xmm3, %xmm3
|
|
; AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX-NEXT: vpor %xmm0, %xmm3, %xmm0
|
|
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: vpor %xmm0, %xmm2, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: test_bitreverse_v16i8:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
%b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %a)
|
|
ret <16 x i8> %b
|
|
}
|
|
|
|
define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_v8i16:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllw $13, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: psllw $15, %xmm1
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllw $11, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllw $9, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllw $7, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllw $5, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllw $3, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllw $1, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlw $1, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlw $3, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlw $5, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlw $7, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlw $9, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlw $11, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlw $13, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: psrlw $15, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm3, %xmm0
|
|
; SSE-NEXT: por %xmm1, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_bitreverse_v8i16:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: vpsllw $13, %xmm0, %xmm1
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm2
|
|
; AVX-NEXT: vpsllw $15, %xmm0, %xmm1
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
|
; AVX-NEXT: vpsllw $11, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllw $9, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllw $7, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllw $5, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllw $3, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllw $1, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlw $3, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlw $5, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlw $7, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlw $9, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlw $11, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlw $13, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlw $15, %xmm0, %xmm0
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX-NEXT: vpor %xmm0, %xmm2, %xmm0
|
|
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: test_bitreverse_v8i16:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
%b = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %a)
|
|
ret <8 x i16> %b
|
|
}
|
|
|
|
define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_v4i32:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: pslld $29, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: pslld $31, %xmm1
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: pslld $27, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: pslld $25, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: pslld $23, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: pslld $21, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: pslld $19, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: pslld $17, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: pslld $15, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: pslld $13, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: pslld $11, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: pslld $9, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: pslld $7, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: pslld $5, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: pslld $3, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: pslld $1, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrld $1, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrld $3, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrld $5, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrld $7, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrld $9, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrld $11, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrld $13, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrld $15, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrld $17, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrld $19, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrld $21, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrld $23, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrld $25, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrld $27, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrld $29, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: psrld $31, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm3, %xmm0
|
|
; SSE-NEXT: por %xmm1, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_bitreverse_v4i32:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vpslld $29, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm2
|
|
; AVX1-NEXT: vpslld $31, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
|
; AVX1-NEXT: vpslld $27, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $25, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $23, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $21, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $19, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $17, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $15, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $13, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $11, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $9, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $7, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $5, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpslld $1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $5, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $7, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $9, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $11, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $13, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $15, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $17, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $19, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $21, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $23, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $25, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $27, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $29, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
|
|
; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_bitreverse_v4i32:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpslld $29, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
|
|
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm2
|
|
; AVX2-NEXT: vpslld $31, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm3
|
|
; AVX2-NEXT: vpand %xmm3, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpslld $27, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $25, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $23, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $21, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $19, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $17, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $15, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $13, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $11, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $9, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $7, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $5, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $3, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpslld $1, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $1, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $3, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $5, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $7, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $9, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $11, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $13, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $15, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $17, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $19, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $21, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $23, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $25, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $27, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $29, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4
|
|
; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm3
|
|
; AVX2-NEXT: vpand %xmm3, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
|
|
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOP-LABEL: test_bitreverse_v4i32:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
%b = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a)
|
|
ret <4 x i32> %b
|
|
}
|
|
|
|
define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_v2i64:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $61, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: psllq $63, %xmm1
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $59, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $57, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $55, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $53, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $51, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $49, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $47, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $45, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $43, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $41, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $39, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $37, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $35, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $33, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $31, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $29, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $27, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $25, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $23, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $21, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $19, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $17, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $15, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $13, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $11, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $9, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $7, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $5, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllq $3, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllq $1, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $1, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $3, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $5, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $7, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $9, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $11, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $13, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $15, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $17, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $19, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $21, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $23, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $25, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $27, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $29, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $31, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $33, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $35, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $37, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $39, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $41, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $43, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $45, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $47, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $49, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $51, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $53, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $55, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $57, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psrlq $59, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlq $61, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: psrlq $63, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm3, %xmm0
|
|
; SSE-NEXT: por %xmm1, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_bitreverse_v2i64:
|
|
; AVX: # BB#0:
|
|
; AVX-NEXT: vpsllq $61, %xmm0, %xmm1
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm2
|
|
; AVX-NEXT: vpsllq $63, %xmm0, %xmm1
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
|
; AVX-NEXT: vpsllq $59, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $57, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $55, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $53, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $51, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $49, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $47, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $45, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $43, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $41, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $39, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $37, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $35, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $33, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $31, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $29, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $27, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $25, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $23, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $21, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $19, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $17, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $15, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $13, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $11, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $9, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $7, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $5, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $3, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsllq $1, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $1, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $3, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $5, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $7, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $9, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $11, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $13, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $15, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $17, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $19, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $21, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $23, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $25, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $27, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $29, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $31, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $33, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $35, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $37, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $39, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $41, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $43, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $45, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $47, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $49, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $51, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $53, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $55, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $57, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $59, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $61, %xmm0, %xmm3
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
|
|
; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsrlq $63, %xmm0, %xmm0
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
; AVX-NEXT: vpor %xmm0, %xmm2, %xmm0
|
|
; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; XOP-LABEL: test_bitreverse_v2i64:
|
|
; XOP: # BB#0:
|
|
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
|
|
; XOP-NEXT: retq
|
|
%b = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %a)
|
|
ret <2 x i64> %b
|
|
}
|
|
|
|
define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_v32i8:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: psllw $5, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm9 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm9
|
|
; SSE-NEXT: pand %xmm9, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm5
|
|
; SSE-NEXT: psllw $7, %xmm5
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
|
|
; SSE-NEXT: pand %xmm10, %xmm10
|
|
; SSE-NEXT: pand %xmm10, %xmm5
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psllw $3, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm11 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm11
|
|
; SSE-NEXT: pand %xmm11, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE-NEXT: paddb %xmm2, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
|
; SSE-NEXT: pand %xmm8, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE-NEXT: psrlw $1, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm12 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm12
|
|
; SSE-NEXT: pand %xmm12, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE-NEXT: psrlw $3, %xmm4
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm6
|
|
; SSE-NEXT: pand %xmm6, %xmm4
|
|
; SSE-NEXT: por %xmm3, %xmm4
|
|
; SSE-NEXT: movdqa %xmm0, %xmm7
|
|
; SSE-NEXT: psrlw $5, %xmm7
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: pand %xmm2, %xmm7
|
|
; SSE-NEXT: por %xmm4, %xmm7
|
|
; SSE-NEXT: psrlw $7, %xmm0
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
|
; SSE-NEXT: pand %xmm3, %xmm3
|
|
; SSE-NEXT: pand %xmm3, %xmm0
|
|
; SSE-NEXT: por %xmm7, %xmm0
|
|
; SSE-NEXT: por %xmm5, %xmm0
|
|
; SSE-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE-NEXT: psllw $5, %xmm4
|
|
; SSE-NEXT: pand %xmm9, %xmm4
|
|
; SSE-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE-NEXT: psllw $7, %xmm5
|
|
; SSE-NEXT: pand %xmm10, %xmm5
|
|
; SSE-NEXT: movdqa %xmm1, %xmm7
|
|
; SSE-NEXT: psllw $3, %xmm7
|
|
; SSE-NEXT: pand %xmm11, %xmm7
|
|
; SSE-NEXT: por %xmm4, %xmm7
|
|
; SSE-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE-NEXT: paddb %xmm4, %xmm4
|
|
; SSE-NEXT: pand %xmm8, %xmm4
|
|
; SSE-NEXT: por %xmm7, %xmm4
|
|
; SSE-NEXT: movdqa %xmm1, %xmm7
|
|
; SSE-NEXT: psrlw $1, %xmm7
|
|
; SSE-NEXT: pand %xmm12, %xmm7
|
|
; SSE-NEXT: por %xmm4, %xmm7
|
|
; SSE-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE-NEXT: psrlw $3, %xmm4
|
|
; SSE-NEXT: pand %xmm6, %xmm4
|
|
; SSE-NEXT: por %xmm7, %xmm4
|
|
; SSE-NEXT: movdqa %xmm1, %xmm6
|
|
; SSE-NEXT: psrlw $5, %xmm6
|
|
; SSE-NEXT: pand %xmm2, %xmm6
|
|
; SSE-NEXT: por %xmm4, %xmm6
|
|
; SSE-NEXT: psrlw $7, %xmm1
|
|
; SSE-NEXT: pand %xmm3, %xmm1
|
|
; SSE-NEXT: por %xmm6, %xmm1
|
|
; SSE-NEXT: por %xmm5, %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_bitreverse_v32i8:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm2
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [224,224,224,224,224,224,224,224,224,224,224,224,224,224,224,224]
|
|
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsllw $5, %xmm0, %xmm4
|
|
; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllw $7, %xmm1, %xmm3
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
|
|
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm5
|
|
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm2, %ymm3, %ymm2
|
|
; AVX1-NEXT: vpsllw $3, %xmm1, %xmm3
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
|
|
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsllw $3, %xmm0, %xmm5
|
|
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm3
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm5
|
|
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $3, %xmm1, %xmm3
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
|
|
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsrlw $3, %xmm0, %xmm5
|
|
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $5, %xmm1, %xmm3
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
|
|
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpsrlw $5, %xmm0, %xmm5
|
|
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
|
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_bitreverse_v32i8:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm1
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
|
; AVX2-NEXT: vpand %ymm2, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm2
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
|
|
; AVX2-NEXT: vpand %ymm3, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllw $5, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpsllw $3, %ymm0, %ymm4
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
|
|
; AVX2-NEXT: vpor %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm4
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
|
|
; AVX2-NEXT: vpor %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm4
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
|
|
; AVX2-NEXT: vpor %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpsrlw $3, %ymm0, %ymm4
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
|
|
; AVX2-NEXT: vpor %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpsrlw $5, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX2-NEXT: vpor %ymm0, %ymm3, %ymm0
|
|
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: test_bitreverse_v32i8:
|
|
; XOPAVX1: # BB#0:
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
|
|
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
|
|
; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: test_bitreverse_v32i8:
|
|
; XOPAVX2: # BB#0:
|
|
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
|
|
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
|
|
; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
%b = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %a)
|
|
ret <32 x i8> %b
|
|
}
|
|
|
|
define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_v16i16:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllw $13, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllw $11, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllw $9, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllw $7, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllw $5, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm12 = [1024,1024,1024,1024,1024,1024,1024,1024]
|
|
; SSE-NEXT: pand %xmm12, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllw $3, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm13 = [512,512,512,512,512,512,512,512]
|
|
; SSE-NEXT: pand %xmm13, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllw $1, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm14 = [256,256,256,256,256,256,256,256]
|
|
; SSE-NEXT: pand %xmm14, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlw $1, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm15 = [128,128,128,128,128,128,128,128]
|
|
; SSE-NEXT: pand %xmm15, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlw $3, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm11 = [64,64,64,64,64,64,64,64]
|
|
; SSE-NEXT: pand %xmm11, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlw $5, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm10 = [32,32,32,32,32,32,32,32]
|
|
; SSE-NEXT: pand %xmm10, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlw $7, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm9 = [16,16,16,16,16,16,16,16]
|
|
; SSE-NEXT: pand %xmm9, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlw $9, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [8,8,8,8,8,8,8,8]
|
|
; SSE-NEXT: pand %xmm8, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlw $11, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm7 = [4,4,4,4,4,4,4,4]
|
|
; SSE-NEXT: pand %xmm7, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: psrlw $13, %xmm0
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [2,2,2,2,2,2,2,2]
|
|
; SSE-NEXT: pand %xmm6, %xmm0
|
|
; SSE-NEXT: por %xmm2, %xmm0
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlw $15, %xmm1
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
|
|
; SSE-NEXT: pand %xmm3, %xmm1
|
|
; SSE-NEXT: por %xmm0, %xmm1
|
|
; SSE-NEXT: psllw $15, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [32768,32768,32768,32768,32768,32768,32768,32768]
|
|
; SSE-NEXT: pand %xmm0, %xmm2
|
|
; SSE-NEXT: por %xmm2, %xmm1
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllw $13, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm2
|
|
; SSE-NEXT: psllw $15, %xmm2
|
|
; SSE-NEXT: pand %xmm0, %xmm2
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllw $11, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllw $9, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllw $7, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllw $5, %xmm4
|
|
; SSE-NEXT: pand %xmm12, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllw $3, %xmm0
|
|
; SSE-NEXT: pand %xmm13, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllw $1, %xmm4
|
|
; SSE-NEXT: pand %xmm14, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlw $1, %xmm0
|
|
; SSE-NEXT: pand %xmm15, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlw $3, %xmm4
|
|
; SSE-NEXT: pand %xmm11, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlw $5, %xmm0
|
|
; SSE-NEXT: pand %xmm10, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlw $7, %xmm4
|
|
; SSE-NEXT: pand %xmm9, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlw $9, %xmm0
|
|
; SSE-NEXT: pand %xmm8, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlw $11, %xmm4
|
|
; SSE-NEXT: pand %xmm7, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlw $13, %xmm0
|
|
; SSE-NEXT: pand %xmm6, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: psrlw $15, %xmm5
|
|
; SSE-NEXT: pand %xmm3, %xmm5
|
|
; SSE-NEXT: por %xmm0, %xmm5
|
|
; SSE-NEXT: por %xmm2, %xmm5
|
|
; SSE-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_bitreverse_v16i16:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vpsllw $13, %xmm0, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpsllw $13, %xmm1, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllw $15, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllw $15, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm2, %ymm3, %ymm2
|
|
; AVX1-NEXT: vpsllw $11, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllw $11, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllw $9, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllw $9, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllw $7, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllw $5, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllw $3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllw $3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllw $1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllw $1, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlw $3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $5, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlw $5, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $9, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlw $9, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $11, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlw $11, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $13, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlw $13, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $15, %xmm1, %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_bitreverse_v16i16:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpsllw $13, %ymm0, %ymm1
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm2
|
|
; AVX2-NEXT: vpsllw $15, %ymm0, %ymm1
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
|
; AVX2-NEXT: vpsllw $11, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllw $9, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllw $5, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllw $3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllw $1, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlw $3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlw $5, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlw $9, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlw $11, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlw $13, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
|
|
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: test_bitreverse_v16i16:
|
|
; XOPAVX1: # BB#0:
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
|
|
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
|
|
; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: test_bitreverse_v16i16:
|
|
; XOPAVX2: # BB#0:
|
|
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
|
|
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
|
|
; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
%b = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %a)
|
|
ret <16 x i16> %b
|
|
}
|
|
|
|
define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_v8i32:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: pslld $29, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: pslld $27, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: pslld $25, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: pslld $23, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: pslld $21, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: pslld $19, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: pslld $17, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: pslld $15, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: pslld $13, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: pslld $11, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: pslld $9, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: pslld $7, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: pslld $5, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: pslld $3, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: pslld $1, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrld $1, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrld $3, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrld $5, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrld $7, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrld $9, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrld $11, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm15 = [1024,1024,1024,1024]
|
|
; SSE-NEXT: pand %xmm15, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrld $13, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm14 = [512,512,512,512]
|
|
; SSE-NEXT: pand %xmm14, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrld $15, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm13 = [256,256,256,256]
|
|
; SSE-NEXT: pand %xmm13, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrld $17, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm12 = [128,128,128,128]
|
|
; SSE-NEXT: pand %xmm12, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrld $19, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm11 = [64,64,64,64]
|
|
; SSE-NEXT: pand %xmm11, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrld $21, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm10 = [32,32,32,32]
|
|
; SSE-NEXT: pand %xmm10, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrld $23, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm9 = [16,16,16,16]
|
|
; SSE-NEXT: pand %xmm9, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrld $25, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [8,8,8,8]
|
|
; SSE-NEXT: pand %xmm8, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrld $27, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm7 = [4,4,4,4]
|
|
; SSE-NEXT: pand %xmm7, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: psrld $29, %xmm0
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [2,2,2,2]
|
|
; SSE-NEXT: pand %xmm6, %xmm0
|
|
; SSE-NEXT: por %xmm2, %xmm0
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrld $31, %xmm1
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
|
|
; SSE-NEXT: pand %xmm3, %xmm1
|
|
; SSE-NEXT: por %xmm0, %xmm1
|
|
; SSE-NEXT: pslld $31, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
|
|
; SSE-NEXT: pand %xmm0, %xmm2
|
|
; SSE-NEXT: por %xmm2, %xmm1
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: pslld $29, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm2
|
|
; SSE-NEXT: pslld $31, %xmm2
|
|
; SSE-NEXT: pand %xmm0, %xmm2
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: pslld $27, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: pslld $25, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: pslld $23, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: pslld $21, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: pslld $19, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: pslld $17, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: pslld $15, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: pslld $13, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: pslld $11, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: pslld $9, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: pslld $7, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: pslld $5, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: pslld $3, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: pslld $1, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrld $1, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrld $3, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrld $5, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrld $7, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrld $9, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrld $11, %xmm4
|
|
; SSE-NEXT: pand %xmm15, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrld $13, %xmm0
|
|
; SSE-NEXT: pand %xmm14, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrld $15, %xmm4
|
|
; SSE-NEXT: pand %xmm13, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrld $17, %xmm0
|
|
; SSE-NEXT: pand %xmm12, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrld $19, %xmm4
|
|
; SSE-NEXT: pand %xmm11, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrld $21, %xmm0
|
|
; SSE-NEXT: pand %xmm10, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrld $23, %xmm4
|
|
; SSE-NEXT: pand %xmm9, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrld $25, %xmm0
|
|
; SSE-NEXT: pand %xmm8, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrld $27, %xmm4
|
|
; SSE-NEXT: pand %xmm7, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrld $29, %xmm0
|
|
; SSE-NEXT: pand %xmm6, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: psrld $31, %xmm5
|
|
; SSE-NEXT: pand %xmm3, %xmm5
|
|
; SSE-NEXT: por %xmm0, %xmm5
|
|
; SSE-NEXT: por %xmm2, %xmm5
|
|
; SSE-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_bitreverse_v8i32:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vpslld $29, %xmm0, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpslld $29, %xmm1, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $31, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $31, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm2, %ymm3, %ymm2
|
|
; AVX1-NEXT: vpslld $27, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $27, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $25, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $25, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $23, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $23, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $21, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $21, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $19, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $19, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $17, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $17, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $15, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $15, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $13, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $13, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $11, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $11, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $9, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $9, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $7, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $7, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $5, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $5, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpslld $1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpslld $1, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $5, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $5, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $7, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $7, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $9, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $9, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $11, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $11, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $13, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $13, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $15, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $15, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $17, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $17, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $19, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $19, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $21, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $21, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $23, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $23, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $25, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $25, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $27, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $27, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $29, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrld $29, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrld $31, %xmm1, %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_bitreverse_v8i32:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpslld $29, %ymm0, %ymm1
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
|
|
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm2
|
|
; AVX2-NEXT: vpslld $31, %ymm0, %ymm1
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm3
|
|
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpslld $27, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $25, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $23, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $21, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $19, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $17, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $15, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $13, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $11, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $9, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $7, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $5, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpslld $1, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $1, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $5, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $7, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $9, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $11, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $13, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $15, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $17, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $19, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $21, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $23, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $25, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $27, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $29, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrld $31, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm3
|
|
; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
|
|
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: test_bitreverse_v8i32:
|
|
; XOPAVX1: # BB#0:
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
|
|
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
|
|
; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: test_bitreverse_v8i32:
|
|
; XOPAVX2: # BB#0:
|
|
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
|
|
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
|
|
; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
%b = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %a)
|
|
ret <8 x i32> %b
|
|
}
|
|
|
|
define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
|
|
; SSE-LABEL: test_bitreverse_v4i64:
|
|
; SSE: # BB#0:
|
|
; SSE-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $61, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $59, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $57, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $55, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $53, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $51, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $49, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $47, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $45, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $43, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $41, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $39, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $37, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $35, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $33, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $31, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $29, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $27, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $25, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $23, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $21, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $19, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $17, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $15, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $13, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $11, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $9, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $7, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $5, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psllq $3, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psllq $1, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $1, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $3, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $5, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $7, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $9, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $11, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $13, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $15, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $17, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $19, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $21, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $23, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $25, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $27, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $29, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $31, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $33, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $35, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $37, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $39, %xmm2
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $41, %xmm3
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $43, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm15 = [1024,1024]
|
|
; SSE-NEXT: pand %xmm15, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $45, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm14 = [512,512]
|
|
; SSE-NEXT: pand %xmm14, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $47, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm13 = [256,256]
|
|
; SSE-NEXT: pand %xmm13, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $49, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm12 = [128,128]
|
|
; SSE-NEXT: pand %xmm12, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $51, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm11 = [64,64]
|
|
; SSE-NEXT: pand %xmm11, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $53, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm10 = [32,32]
|
|
; SSE-NEXT: pand %xmm10, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $55, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm9 = [16,16]
|
|
; SSE-NEXT: pand %xmm9, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE-NEXT: psrlq $57, %xmm3
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [8,8]
|
|
; SSE-NEXT: pand %xmm8, %xmm3
|
|
; SSE-NEXT: por %xmm2, %xmm3
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $59, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm7 = [4,4]
|
|
; SSE-NEXT: pand %xmm7, %xmm2
|
|
; SSE-NEXT: por %xmm3, %xmm2
|
|
; SSE-NEXT: psrlq $61, %xmm0
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [2,2]
|
|
; SSE-NEXT: pand %xmm6, %xmm0
|
|
; SSE-NEXT: por %xmm2, %xmm0
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: psrlq $63, %xmm1
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
|
|
; SSE-NEXT: pand %xmm3, %xmm1
|
|
; SSE-NEXT: por %xmm0, %xmm1
|
|
; SSE-NEXT: psllq $63, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
|
|
; SSE-NEXT: pand %xmm0, %xmm2
|
|
; SSE-NEXT: por %xmm2, %xmm1
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $61, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm2
|
|
; SSE-NEXT: psllq $63, %xmm2
|
|
; SSE-NEXT: pand %xmm0, %xmm2
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $59, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $57, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $55, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $53, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $51, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $49, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $47, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $45, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $43, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $41, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $39, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $37, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $35, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $33, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $31, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $29, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $27, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $25, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $23, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $21, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $19, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $17, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $15, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $13, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $11, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $9, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $7, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $5, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psllq $3, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psllq $1, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $1, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $3, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $5, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $7, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $9, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $11, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $13, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $15, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $17, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $19, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $21, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $23, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $25, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $27, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $29, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $31, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $33, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $35, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $37, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $39, %xmm4
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $41, %xmm0
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $43, %xmm4
|
|
; SSE-NEXT: pand %xmm15, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $45, %xmm0
|
|
; SSE-NEXT: pand %xmm14, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $47, %xmm4
|
|
; SSE-NEXT: pand %xmm13, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $49, %xmm0
|
|
; SSE-NEXT: pand %xmm12, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $51, %xmm4
|
|
; SSE-NEXT: pand %xmm11, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $53, %xmm0
|
|
; SSE-NEXT: pand %xmm10, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $55, %xmm4
|
|
; SSE-NEXT: pand %xmm9, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $57, %xmm0
|
|
; SSE-NEXT: pand %xmm8, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm4
|
|
; SSE-NEXT: psrlq $59, %xmm4
|
|
; SSE-NEXT: pand %xmm7, %xmm4
|
|
; SSE-NEXT: por %xmm0, %xmm4
|
|
; SSE-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE-NEXT: psrlq $61, %xmm0
|
|
; SSE-NEXT: pand %xmm6, %xmm0
|
|
; SSE-NEXT: por %xmm4, %xmm0
|
|
; SSE-NEXT: psrlq $63, %xmm5
|
|
; SSE-NEXT: pand %xmm3, %xmm5
|
|
; SSE-NEXT: por %xmm0, %xmm5
|
|
; SSE-NEXT: por %xmm2, %xmm5
|
|
; SSE-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE-NEXT: movdqa %xmm5, %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_bitreverse_v4i64:
|
|
; AVX1: # BB#0:
|
|
; AVX1-NEXT: vpsllq $61, %xmm0, %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vpsllq $61, %xmm1, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $63, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $63, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm2, %ymm3, %ymm2
|
|
; AVX1-NEXT: vpsllq $59, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $59, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $57, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $57, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $55, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $55, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $53, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $53, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $51, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $51, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $49, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $49, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $47, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $47, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $45, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $45, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $43, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $43, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $41, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $41, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $39, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $39, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $37, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $37, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $35, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $35, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $33, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $33, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $31, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $31, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $29, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $29, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $27, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $27, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $25, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $25, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $23, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $23, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $21, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $21, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $19, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $19, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $17, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $17, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $15, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $15, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $13, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $13, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $11, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $11, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $9, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $9, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $7, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $7, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $5, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $5, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsllq $1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsllq $1, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $3, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $3, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $5, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $5, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $7, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $9, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $9, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $11, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $11, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $13, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $13, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $15, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $15, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $17, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $17, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $19, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $19, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $21, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $21, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $23, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $23, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $25, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $25, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $27, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $27, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $29, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $29, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $31, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $31, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $33, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $35, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $35, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $37, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $37, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $39, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $39, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $41, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $41, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $43, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $43, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $45, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $45, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $47, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $47, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $49, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $49, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $51, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $51, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $53, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $53, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $55, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $55, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $57, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $57, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $59, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $59, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $61, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpsrlq $61, %xmm1, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm3, %ymm3
|
|
; AVX1-NEXT: vorps %ymm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
|
; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_bitreverse_v4i64:
|
|
; AVX2: # BB#0:
|
|
; AVX2-NEXT: vpsllq $61, %ymm0, %ymm1
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
|
|
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm2
|
|
; AVX2-NEXT: vpsllq $63, %ymm0, %ymm1
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm3
|
|
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpsllq $59, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $57, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $55, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $53, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $51, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $49, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $47, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $45, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $43, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $41, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $39, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $37, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $35, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $33, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $31, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $29, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $27, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $25, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $23, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $21, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $19, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $17, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $15, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $13, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $11, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $9, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $7, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $5, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsllq $1, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $3, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $5, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $9, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $11, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $13, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $15, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $17, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $19, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $21, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $23, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $25, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $27, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $29, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $31, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $33, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $35, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $37, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $39, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $41, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $43, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $45, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $47, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $49, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $51, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $53, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $55, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $57, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $59, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $61, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm4
|
|
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpsrlq $63, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm3
|
|
; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
|
|
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; XOPAVX1-LABEL: test_bitreverse_v4i64:
|
|
; XOPAVX1: # BB#0:
|
|
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
|
|
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
|
|
; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
|
|
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX1-NEXT: retq
|
|
;
|
|
; XOPAVX2-LABEL: test_bitreverse_v4i64:
|
|
; XOPAVX2: # BB#0:
|
|
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
|
|
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
|
|
; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
|
|
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
; XOPAVX2-NEXT: retq
|
|
%b = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %a)
|
|
ret <4 x i64> %b
|
|
}
|
|
|
|
declare i8 @llvm.bitreverse.i8(i8) readnone
|
|
declare i16 @llvm.bitreverse.i16(i16) readnone
|
|
declare i32 @llvm.bitreverse.i32(i32) readnone
|
|
declare i64 @llvm.bitreverse.i64(i64) readnone
|
|
|
|
declare <16 x i8> @llvm.bitreverse.v16i8(<16 x i8>) readnone
|
|
declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>) readnone
|
|
declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) readnone
|
|
declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>) readnone
|
|
|
|
declare <32 x i8> @llvm.bitreverse.v32i8(<32 x i8>) readnone
|
|
declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>) readnone
|
|
declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>) readnone
|
|
declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) readnone
|