llvm-project/llvm/test/CodeGen/X86/vector-bitreverse.ll

2556 lines
118 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
define i8 @test_bitreverse_i8(i8 %a) nounwind {
; SSE-LABEL: test_bitreverse_i8:
; SSE: # BB#0:
; SSE-NEXT: rolb $4, %dil
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andb $51, %al
; SSE-NEXT: shlb $2, %al
; SSE-NEXT: andb $-52, %dil
; SSE-NEXT: shrb $2, %dil
; SSE-NEXT: orb %al, %dil
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andb $85, %al
; SSE-NEXT: addb %al, %al
; SSE-NEXT: andb $-86, %dil
; SSE-NEXT: shrb %dil
; SSE-NEXT: orb %al, %dil
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i8:
; AVX: # BB#0:
; AVX-NEXT: rolb $4, %dil
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andb $51, %al
; AVX-NEXT: shlb $2, %al
; AVX-NEXT: andb $-52, %dil
; AVX-NEXT: shrb $2, %dil
; AVX-NEXT: orb %al, %dil
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andb $85, %al
; AVX-NEXT: addb %al, %al
; AVX-NEXT: andb $-86, %dil
; AVX-NEXT: shrb %dil
; AVX-NEXT: orb %al, %dil
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i8:
; XOP: # BB#0:
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vpextrb $0, %xmm0, %eax
; XOP-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; XOP-NEXT: retq
%b = call i8 @llvm.bitreverse.i8(i8 %a)
ret i8 %b
}
define i16 @test_bitreverse_i16(i16 %a) nounwind {
; SSE-LABEL: test_bitreverse_i16:
; SSE: # BB#0:
; SSE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSE-NEXT: rolw $8, %di
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andl $3855, %eax # imm = 0xF0F
; SSE-NEXT: shll $4, %eax
; SSE-NEXT: andl $61680, %edi # imm = 0xF0F0
; SSE-NEXT: shrl $4, %edi
; SSE-NEXT: orl %eax, %edi
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andl $13107, %eax # imm = 0x3333
; SSE-NEXT: andl $52428, %edi # imm = 0xCCCC
; SSE-NEXT: shrl $2, %edi
; SSE-NEXT: leal (%rdi,%rax,4), %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: andl $21845, %ecx # imm = 0x5555
; SSE-NEXT: andl $43690, %eax # imm = 0xAAAA
; SSE-NEXT: shrl %eax
; SSE-NEXT: leal (%rax,%rcx,2), %eax
; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i16:
; AVX: # BB#0:
; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX-NEXT: rolw $8, %di
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andl $3855, %eax # imm = 0xF0F
; AVX-NEXT: shll $4, %eax
; AVX-NEXT: andl $61680, %edi # imm = 0xF0F0
; AVX-NEXT: shrl $4, %edi
; AVX-NEXT: orl %eax, %edi
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andl $13107, %eax # imm = 0x3333
; AVX-NEXT: andl $52428, %edi # imm = 0xCCCC
; AVX-NEXT: shrl $2, %edi
; AVX-NEXT: leal (%rdi,%rax,4), %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: andl $21845, %ecx # imm = 0x5555
; AVX-NEXT: andl $43690, %eax # imm = 0xAAAA
; AVX-NEXT: shrl %eax
; AVX-NEXT: leal (%rax,%rcx,2), %eax
; AVX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i16:
; XOP: # BB#0:
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovd %xmm0, %eax
; XOP-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; XOP-NEXT: retq
%b = call i16 @llvm.bitreverse.i16(i16 %a)
ret i16 %b
}
define i32 @test_bitreverse_i32(i32 %a) nounwind {
; SSE-LABEL: test_bitreverse_i32:
; SSE: # BB#0:
; SSE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSE-NEXT: bswapl %edi
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
; SSE-NEXT: shll $4, %eax
; SSE-NEXT: andl $-252645136, %edi # imm = 0xF0F0F0F0
; SSE-NEXT: shrl $4, %edi
; SSE-NEXT: orl %eax, %edi
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andl $858993459, %eax # imm = 0x33333333
; SSE-NEXT: andl $-858993460, %edi # imm = 0xCCCCCCCC
; SSE-NEXT: shrl $2, %edi
; SSE-NEXT: leal (%rdi,%rax,4), %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: andl $1431655765, %ecx # imm = 0x55555555
; SSE-NEXT: andl $-1431655766, %eax # imm = 0xAAAAAAAA
; SSE-NEXT: shrl %eax
; SSE-NEXT: leal (%rax,%rcx,2), %eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i32:
; AVX: # BB#0:
; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX-NEXT: bswapl %edi
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
; AVX-NEXT: shll $4, %eax
; AVX-NEXT: andl $-252645136, %edi # imm = 0xF0F0F0F0
; AVX-NEXT: shrl $4, %edi
; AVX-NEXT: orl %eax, %edi
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andl $858993459, %eax # imm = 0x33333333
; AVX-NEXT: andl $-858993460, %edi # imm = 0xCCCCCCCC
; AVX-NEXT: shrl $2, %edi
; AVX-NEXT: leal (%rdi,%rax,4), %eax
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: andl $1431655765, %ecx # imm = 0x55555555
; AVX-NEXT: andl $-1431655766, %eax # imm = 0xAAAAAAAA
; AVX-NEXT: shrl %eax
; AVX-NEXT: leal (%rax,%rcx,2), %eax
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i32:
; XOP: # BB#0:
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovd %xmm0, %eax
; XOP-NEXT: retq
%b = call i32 @llvm.bitreverse.i32(i32 %a)
ret i32 %b
}
define i64 @test_bitreverse_i64(i64 %a) nounwind {
; SSE-LABEL: test_bitreverse_i64:
; SSE: # BB#0:
; SSE-NEXT: bswapq %rdi
; SSE-NEXT: movabsq $1085102592571150095, %rax # imm = 0xF0F0F0F0F0F0F0F
; SSE-NEXT: andq %rdi, %rax
; SSE-NEXT: shlq $4, %rax
; SSE-NEXT: movabsq $-1085102592571150096, %rcx # imm = 0xF0F0F0F0F0F0F0F0
; SSE-NEXT: andq %rdi, %rcx
; SSE-NEXT: shrq $4, %rcx
; SSE-NEXT: orq %rax, %rcx
; SSE-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
; SSE-NEXT: andq %rcx, %rax
; SSE-NEXT: movabsq $-3689348814741910324, %rdx # imm = 0xCCCCCCCCCCCCCCCC
; SSE-NEXT: andq %rcx, %rdx
; SSE-NEXT: shrq $2, %rdx
; SSE-NEXT: leaq (%rdx,%rax,4), %rax
; SSE-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
; SSE-NEXT: andq %rax, %rcx
; SSE-NEXT: movabsq $-6148914691236517206, %rdx # imm = 0xAAAAAAAAAAAAAAAA
; SSE-NEXT: andq %rax, %rdx
; SSE-NEXT: shrq %rdx
; SSE-NEXT: leaq (%rdx,%rcx,2), %rax
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i64:
; AVX: # BB#0:
; AVX-NEXT: bswapq %rdi
; AVX-NEXT: movabsq $1085102592571150095, %rax # imm = 0xF0F0F0F0F0F0F0F
; AVX-NEXT: andq %rdi, %rax
; AVX-NEXT: shlq $4, %rax
; AVX-NEXT: movabsq $-1085102592571150096, %rcx # imm = 0xF0F0F0F0F0F0F0F0
; AVX-NEXT: andq %rdi, %rcx
; AVX-NEXT: shrq $4, %rcx
; AVX-NEXT: orq %rax, %rcx
; AVX-NEXT: movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
; AVX-NEXT: andq %rcx, %rax
; AVX-NEXT: movabsq $-3689348814741910324, %rdx # imm = 0xCCCCCCCCCCCCCCCC
; AVX-NEXT: andq %rcx, %rdx
; AVX-NEXT: shrq $2, %rdx
; AVX-NEXT: leaq (%rdx,%rax,4), %rax
; AVX-NEXT: movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
; AVX-NEXT: andq %rax, %rcx
; AVX-NEXT: movabsq $-6148914691236517206, %rdx # imm = 0xAAAAAAAAAAAAAAAA
; AVX-NEXT: andq %rax, %rdx
; AVX-NEXT: shrq %rdx
; AVX-NEXT: leaq (%rdx,%rcx,2), %rax
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i64:
; XOP: # BB#0:
; XOP-NEXT: vmovq %rdi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovq %xmm0, %rax
; XOP-NEXT: retq
%b = call i64 @llvm.bitreverse.i64(i64 %a)
ret i64 %b
}
define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i8:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: psllw $4, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm3, %xmm2
; SSE2-NEXT: pand %xmm3, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: psrlw $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v16i8:
; SSSE3: # BB#0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm1, %xmm2
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: pshufb %xmm2, %xmm3
; SSSE3-NEXT: psrlw $4, %xmm0
; SSSE3-NEXT: pand %xmm1, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: pshufb %xmm0, %xmm1
; SSSE3-NEXT: por %xmm3, %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v16i8:
; AVX: # BB#0:
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v16i8:
; XOP: # BB#0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %a)
ret <16 x i8> %b
}
define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i16:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: psllw $4, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm3, %xmm2
; SSE2-NEXT: pand %xmm3, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: psrlw $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v8i16:
; SSSE3: # BB#0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm1, %xmm2
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: pshufb %xmm2, %xmm3
; SSSE3-NEXT: psrlw $4, %xmm0
; SSSE3-NEXT: pand %xmm1, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: pshufb %xmm0, %xmm1
; SSSE3-NEXT: por %xmm3, %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v8i16:
; AVX: # BB#0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v8i16:
; XOP: # BB#0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %a)
ret <8 x i16> %b
}
define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v4i32:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: psllw $4, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm3, %xmm2
; SSE2-NEXT: pand %xmm3, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: psrlw $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v4i32:
; SSSE3: # BB#0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm1, %xmm2
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: pshufb %xmm2, %xmm3
; SSSE3-NEXT: psrlw $4, %xmm0
; SSSE3-NEXT: pand %xmm1, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: pshufb %xmm0, %xmm1
; SSSE3-NEXT: por %xmm3, %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v4i32:
; AVX: # BB#0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v4i32:
; XOP: # BB#0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a)
ret <4 x i32> %b
}
define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v2i64:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: psllw $4, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm3, %xmm2
; SSE2-NEXT: pand %xmm3, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: psllw $2, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: psrlw $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v2i64:
; SSSE3: # BB#0:
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm1, %xmm2
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: pshufb %xmm2, %xmm3
; SSSE3-NEXT: psrlw $4, %xmm0
; SSSE3-NEXT: pand %xmm1, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: pshufb %xmm0, %xmm1
; SSSE3-NEXT: por %xmm3, %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: test_bitreverse_v2i64:
; AVX: # BB#0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_v2i64:
; XOP: # BB#0:
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: retq
%b = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %a)
ret <2 x i64> %b
}
define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v32i8:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pand %xmm2, %xmm3
; SSE2-NEXT: psllw $4, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: pand %xmm5, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pand %xmm3, %xmm4
; SSE2-NEXT: psllw $2, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; SSE2-NEXT: pand %xmm8, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm9, %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; SSE2-NEXT: pand %xmm10, %xmm0
; SSE2-NEXT: por %xmm4, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: pand %xmm4, %xmm7
; SSE2-NEXT: psrlw $1, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE2-NEXT: pand %xmm11, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm6, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm7, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm7
; SSE2-NEXT: pand %xmm2, %xmm7
; SSE2-NEXT: psllw $4, %xmm7
; SSE2-NEXT: pand %xmm5, %xmm7
; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: psrlw $4, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: por %xmm7, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
; SSE2-NEXT: pand %xmm8, %xmm3
; SSE2-NEXT: pand %xmm9, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm10, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm4
; SSE2-NEXT: psrlw $1, %xmm4
; SSE2-NEXT: pand %xmm11, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v32i8:
; SSSE3: # BB#0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm4, %xmm2
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: movdqa %xmm5, %xmm6
; SSSE3-NEXT: pshufb %xmm2, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm0
; SSSE3-NEXT: pand %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: movdqa %xmm2, %xmm3
; SSSE3-NEXT: pshufb %xmm0, %xmm3
; SSSE3-NEXT: por %xmm6, %xmm3
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: pand %xmm4, %xmm0
; SSSE3-NEXT: pshufb %xmm0, %xmm5
; SSSE3-NEXT: psrlw $4, %xmm1
; SSSE3-NEXT: pand %xmm4, %xmm1
; SSSE3-NEXT: pshufb %xmm1, %xmm2
; SSSE3-NEXT: por %xmm5, %xmm2
; SSSE3-NEXT: movdqa %xmm3, %xmm0
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v32i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v32i8:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX2-NEXT: vpshufb %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v32i8:
; AVX512: # BB#0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX512-NEXT: vpshufb %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v32i8:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v32i8:
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
%b = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %a)
ret <32 x i8> %b
}
define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i16:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pand %xmm2, %xmm3
; SSE2-NEXT: psllw $4, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: pand %xmm6, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; SSE2-NEXT: pand %xmm8, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm9, %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; SSE2-NEXT: pand %xmm10, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: pand %xmm5, %xmm7
; SSE2-NEXT: psrlw $1, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE2-NEXT: pand %xmm11, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm12, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm7, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm7
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,4,7,6]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,7,6]
; SSE2-NEXT: packuswb %xmm7, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm2, %xmm4
; SSE2-NEXT: psllw $4, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: psrlw $4, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
; SSE2-NEXT: pand %xmm8, %xmm3
; SSE2-NEXT: pand %xmm9, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm10, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm5
; SSE2-NEXT: psrlw $1, %xmm5
; SSE2-NEXT: pand %xmm11, %xmm5
; SSE2-NEXT: pand %xmm12, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm5, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v16i16:
; SSSE3: # BB#0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm5, %xmm2
; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: movdqa %xmm6, %xmm7
; SSSE3-NEXT: pshufb %xmm2, %xmm7
; SSSE3-NEXT: psrlw $4, %xmm0
; SSSE3-NEXT: pand %xmm5, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: movdqa %xmm2, %xmm3
; SSSE3-NEXT: pshufb %xmm0, %xmm3
; SSSE3-NEXT: por %xmm7, %xmm3
; SSSE3-NEXT: pshufb %xmm4, %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: pand %xmm5, %xmm0
; SSSE3-NEXT: pshufb %xmm0, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm1
; SSSE3-NEXT: pand %xmm5, %xmm1
; SSSE3-NEXT: pshufb %xmm1, %xmm2
; SSSE3-NEXT: por %xmm6, %xmm2
; SSSE3-NEXT: movdqa %xmm3, %xmm0
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v16i16:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm1, %xmm6, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm0, %xmm6, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v16i16:
; AVX2: # BB#0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX2-NEXT: vpshufb %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v16i16:
; AVX512: # BB#0:
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX512-NEXT: vpshufb %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v16i16:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v16i16:
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
%b = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %a)
ret <16 x i16> %b
}
define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i32:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pand %xmm2, %xmm3
; SSE2-NEXT: psllw $4, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: pand %xmm6, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; SSE2-NEXT: pand %xmm8, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm9, %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; SSE2-NEXT: pand %xmm10, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: pand %xmm5, %xmm7
; SSE2-NEXT: psrlw $1, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE2-NEXT: pand %xmm11, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm12, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm7, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm7
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm7, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm2, %xmm4
; SSE2-NEXT: psllw $4, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: psrlw $4, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
; SSE2-NEXT: pand %xmm8, %xmm3
; SSE2-NEXT: pand %xmm9, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm10, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm5
; SSE2-NEXT: psrlw $1, %xmm5
; SSE2-NEXT: pand %xmm11, %xmm5
; SSE2-NEXT: pand %xmm12, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm5, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v8i32:
; SSSE3: # BB#0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm5, %xmm2
; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: movdqa %xmm6, %xmm7
; SSSE3-NEXT: pshufb %xmm2, %xmm7
; SSSE3-NEXT: psrlw $4, %xmm0
; SSSE3-NEXT: pand %xmm5, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: movdqa %xmm2, %xmm3
; SSSE3-NEXT: pshufb %xmm0, %xmm3
; SSSE3-NEXT: por %xmm7, %xmm3
; SSSE3-NEXT: pshufb %xmm4, %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: pand %xmm5, %xmm0
; SSSE3-NEXT: pshufb %xmm0, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm1
; SSSE3-NEXT: pand %xmm5, %xmm1
; SSSE3-NEXT: pshufb %xmm1, %xmm2
; SSSE3-NEXT: por %xmm6, %xmm2
; SSSE3-NEXT: movdqa %xmm3, %xmm0
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v8i32:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm1, %xmm6, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm0, %xmm6, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v8i32:
; AVX2: # BB#0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX2-NEXT: vpshufb %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v8i32:
; AVX512: # BB#0:
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX512-NEXT: vpshufb %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v8i32:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v8i32:
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
%b = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %a)
ret <8 x i32> %b
}
define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v4i64:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: pand %xmm2, %xmm3
; SSE2-NEXT: psllw $4, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm6, %xmm3
; SSE2-NEXT: pand %xmm6, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; SSE2-NEXT: pand %xmm8, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm9, %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; SSE2-NEXT: pand %xmm10, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: pand %xmm5, %xmm7
; SSE2-NEXT: psrlw $1, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE2-NEXT: pand %xmm11, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm12, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm7, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm7
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm7, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm2, %xmm4
; SSE2-NEXT: psllw $4, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: psrlw $4, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: psllw $2, %xmm3
; SSE2-NEXT: pand %xmm8, %xmm3
; SSE2-NEXT: pand %xmm9, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm10, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm5
; SSE2-NEXT: psrlw $1, %xmm5
; SSE2-NEXT: pand %xmm11, %xmm5
; SSE2-NEXT: pand %xmm12, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm5, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v4i64:
; SSSE3: # BB#0:
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; SSSE3-NEXT: pshufb %xmm4, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm5, %xmm2
; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: movdqa %xmm6, %xmm7
; SSSE3-NEXT: pshufb %xmm2, %xmm7
; SSSE3-NEXT: psrlw $4, %xmm0
; SSSE3-NEXT: pand %xmm5, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: movdqa %xmm2, %xmm3
; SSSE3-NEXT: pshufb %xmm0, %xmm3
; SSSE3-NEXT: por %xmm7, %xmm3
; SSSE3-NEXT: pshufb %xmm4, %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: pand %xmm5, %xmm0
; SSSE3-NEXT: pshufb %xmm0, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm1
; SSSE3-NEXT: pand %xmm5, %xmm1
; SSSE3-NEXT: pshufb %xmm1, %xmm2
; SSSE3-NEXT: por %xmm6, %xmm2
; SSSE3-NEXT: movdqa %xmm3, %xmm0
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm1, %xmm6, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm0, %xmm6, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v4i64:
; AVX2: # BB#0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX2-NEXT: vpshufb %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_bitreverse_v4i64:
; AVX512: # BB#0:
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX512-NEXT: vpshufb %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX512-NEXT: vpshufb %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v4i64:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v4i64:
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
%b = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %a)
ret <4 x i64> %b
}
define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v64i8:
; SSE2: # BB#0:
; SSE2-NEXT: movdqa {{.*#+}} xmm13 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm13, %xmm5
; SSE2-NEXT: psllw $4, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm7, %xmm5
; SSE2-NEXT: pand %xmm7, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm13, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: psllw $2, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; SSE2-NEXT: pand %xmm8, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm9, %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; SSE2-NEXT: pand %xmm10, %xmm0
; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: psrlw $1, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE2-NEXT: pand %xmm11, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm12, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm4, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm13, %xmm4
; SSE2-NEXT: psllw $4, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm1
; SSE2-NEXT: psrlw $4, %xmm1
; SSE2-NEXT: pand %xmm13, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: psllw $2, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm4
; SSE2-NEXT: pand %xmm9, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm10, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: psrlw $1, %xmm4
; SSE2-NEXT: pand %xmm11, %xmm4
; SSE2-NEXT: pand %xmm12, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: pand %xmm13, %xmm4
; SSE2-NEXT: psllw $4, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm2
; SSE2-NEXT: psrlw $4, %xmm2
; SSE2-NEXT: pand %xmm13, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: psllw $2, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm4
; SSE2-NEXT: pand %xmm9, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
; SSE2-NEXT: pand %xmm10, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: pand %xmm6, %xmm4
; SSE2-NEXT: psrlw $1, %xmm4
; SSE2-NEXT: pand %xmm11, %xmm4
; SSE2-NEXT: pand %xmm12, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pand %xmm13, %xmm4
; SSE2-NEXT: psllw $4, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm4
; SSE2-NEXT: pand %xmm7, %xmm3
; SSE2-NEXT: psrlw $4, %xmm3
; SSE2-NEXT: pand %xmm13, %xmm3
; SSE2-NEXT: por %xmm4, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm5
; SSE2-NEXT: pand %xmm9, %xmm3
; SSE2-NEXT: psrlw $2, %xmm3
; SSE2-NEXT: pand %xmm10, %xmm3
; SSE2-NEXT: por %xmm5, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm6
; SSE2-NEXT: psrlw $1, %xmm6
; SSE2-NEXT: pand %xmm11, %xmm6
; SSE2-NEXT: pand %xmm12, %xmm3
; SSE2-NEXT: paddb %xmm3, %xmm3
; SSE2-NEXT: por %xmm6, %xmm3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v64i8:
; SSSE3: # BB#0:
; SSSE3-NEXT: movdqa %xmm0, %xmm5
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: pand %xmm8, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: movdqa %xmm9, %xmm6
; SSSE3-NEXT: pshufb %xmm0, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm5
; SSSE3-NEXT: pand %xmm8, %xmm5
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: movdqa %xmm4, %xmm0
; SSSE3-NEXT: pshufb %xmm5, %xmm0
; SSSE3-NEXT: por %xmm6, %xmm0
; SSSE3-NEXT: movdqa %xmm1, %xmm5
; SSSE3-NEXT: pand %xmm8, %xmm5
; SSSE3-NEXT: movdqa %xmm9, %xmm6
; SSSE3-NEXT: pshufb %xmm5, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm1
; SSSE3-NEXT: pand %xmm8, %xmm1
; SSSE3-NEXT: movdqa %xmm4, %xmm5
; SSSE3-NEXT: pshufb %xmm1, %xmm5
; SSSE3-NEXT: por %xmm6, %xmm5
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: pand %xmm8, %xmm1
; SSSE3-NEXT: movdqa %xmm9, %xmm7
; SSSE3-NEXT: pshufb %xmm1, %xmm7
; SSSE3-NEXT: psrlw $4, %xmm2
; SSSE3-NEXT: pand %xmm8, %xmm2
; SSSE3-NEXT: movdqa %xmm4, %xmm6
; SSSE3-NEXT: pshufb %xmm2, %xmm6
; SSSE3-NEXT: por %xmm7, %xmm6
; SSSE3-NEXT: movdqa %xmm3, %xmm1
; SSSE3-NEXT: pand %xmm8, %xmm1
; SSSE3-NEXT: pshufb %xmm1, %xmm9
; SSSE3-NEXT: psrlw $4, %xmm3
; SSSE3-NEXT: pand %xmm8, %xmm3
; SSSE3-NEXT: pshufb %xmm3, %xmm4
; SSSE3-NEXT: por %xmm9, %xmm4
; SSSE3-NEXT: movdqa %xmm5, %xmm1
; SSSE3-NEXT: movdqa %xmm6, %xmm2
; SSSE3-NEXT: movdqa %xmm4, %xmm3
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v64i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm4
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm0, %xmm6, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm4
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm6, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v64i8:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX2-NEXT: vpshufb %ymm3, %ymm4, %ymm3
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX2-NEXT: vpshufb %ymm0, %ymm5, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpshufb %ymm3, %ymm4, %ymm3
; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpshufb %ymm1, %ymm5, %ymm1
; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v64i8:
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX512F-NEXT: vpshufb %ymm3, %ymm4, %ymm3
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX512F-NEXT: vpshufb %ymm0, %ymm5, %ymm0
; AVX512F-NEXT: vpor %ymm0, %ymm3, %ymm0
; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm3
; AVX512F-NEXT: vpshufb %ymm3, %ymm4, %ymm3
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpshufb %ymm1, %ymm5, %ymm1
; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v64i8:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX512BW-NEXT: vpshufb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v64i8:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm0, %xmm0, %xmm0
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v64i8:
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm0, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; XOPAVX2-NEXT: retq
%b = call <64 x i8> @llvm.bitreverse.v64i8(<64 x i8> %a)
ret <64 x i8> %b
}
define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v32i16:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,7,6]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
; SSE2-NEXT: packuswb %xmm4, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm5
; SSE2-NEXT: psllw $4, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm8, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: pand %xmm5, %xmm7
; SSE2-NEXT: psllw $2, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; SSE2-NEXT: pand %xmm9, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm10, %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; SSE2-NEXT: pand %xmm11, %xmm0
; SSE2-NEXT: por %xmm7, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: pand %xmm7, %xmm6
; SSE2-NEXT: psrlw $1, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE2-NEXT: pand %xmm12, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm13 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm13, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,7,6]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,7,6]
; SSE2-NEXT: packuswb %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: psrlw $4, %xmm1
; SSE2-NEXT: pand %xmm8, %xmm1
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: psllw $2, %xmm6
; SSE2-NEXT: pand %xmm9, %xmm6
; SSE2-NEXT: pand %xmm10, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm11, %xmm1
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pand %xmm7, %xmm6
; SSE2-NEXT: psrlw $1, %xmm6
; SSE2-NEXT: pand %xmm12, %xmm6
; SSE2-NEXT: pand %xmm13, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,7,6]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3],xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6]
; SSE2-NEXT: packuswb %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm2
; SSE2-NEXT: psrlw $4, %xmm2
; SSE2-NEXT: pand %xmm8, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: psllw $2, %xmm6
; SSE2-NEXT: pand %xmm9, %xmm6
; SSE2-NEXT: pand %xmm10, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
; SSE2-NEXT: pand %xmm11, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pand %xmm7, %xmm6
; SSE2-NEXT: psrlw $1, %xmm6
; SSE2-NEXT: pand %xmm12, %xmm6
; SSE2-NEXT: pand %xmm13, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,7,6]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,7,6]
; SSE2-NEXT: packuswb %xmm6, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm3
; SSE2-NEXT: psrlw $4, %xmm3
; SSE2-NEXT: pand %xmm8, %xmm3
; SSE2-NEXT: por %xmm6, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
; SSE2-NEXT: pand %xmm9, %xmm5
; SSE2-NEXT: pand %xmm10, %xmm3
; SSE2-NEXT: psrlw $2, %xmm3
; SSE2-NEXT: pand %xmm11, %xmm3
; SSE2-NEXT: por %xmm5, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm7
; SSE2-NEXT: psrlw $1, %xmm7
; SSE2-NEXT: pand %xmm12, %xmm7
; SSE2-NEXT: pand %xmm13, %xmm3
; SSE2-NEXT: paddb %xmm3, %xmm3
; SSE2-NEXT: por %xmm7, %xmm3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v32i16:
; SSSE3: # BB#0:
; SSSE3-NEXT: movdqa %xmm1, %xmm5
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; SSSE3-NEXT: pshufb %xmm8, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: pand %xmm9, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm7 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: movdqa %xmm7, %xmm6
; SSSE3-NEXT: pshufb %xmm0, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm1
; SSSE3-NEXT: pand %xmm9, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: movdqa %xmm4, %xmm0
; SSSE3-NEXT: pshufb %xmm1, %xmm0
; SSSE3-NEXT: por %xmm6, %xmm0
; SSSE3-NEXT: pshufb %xmm8, %xmm5
; SSSE3-NEXT: movdqa %xmm5, %xmm1
; SSSE3-NEXT: pand %xmm9, %xmm1
; SSSE3-NEXT: movdqa %xmm7, %xmm6
; SSSE3-NEXT: pshufb %xmm1, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm5
; SSSE3-NEXT: pand %xmm9, %xmm5
; SSSE3-NEXT: movdqa %xmm4, %xmm1
; SSSE3-NEXT: pshufb %xmm5, %xmm1
; SSSE3-NEXT: por %xmm6, %xmm1
; SSSE3-NEXT: pshufb %xmm8, %xmm2
; SSSE3-NEXT: movdqa %xmm2, %xmm5
; SSSE3-NEXT: pand %xmm9, %xmm5
; SSSE3-NEXT: movdqa %xmm7, %xmm6
; SSSE3-NEXT: pshufb %xmm5, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm2
; SSSE3-NEXT: pand %xmm9, %xmm2
; SSSE3-NEXT: movdqa %xmm4, %xmm5
; SSSE3-NEXT: pshufb %xmm2, %xmm5
; SSSE3-NEXT: por %xmm6, %xmm5
; SSSE3-NEXT: pshufb %xmm8, %xmm3
; SSSE3-NEXT: movdqa %xmm3, %xmm2
; SSSE3-NEXT: pand %xmm9, %xmm2
; SSSE3-NEXT: pshufb %xmm2, %xmm7
; SSSE3-NEXT: psrlw $4, %xmm3
; SSSE3-NEXT: pand %xmm9, %xmm3
; SSSE3-NEXT: pshufb %xmm3, %xmm4
; SSSE3-NEXT: por %xmm7, %xmm4
; SSSE3-NEXT: movdqa %xmm5, %xmm2
; SSSE3-NEXT: movdqa %xmm4, %xmm3
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v32i16:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm7, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm5
; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm0, %xmm7, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm5
; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm7, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm7, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v32i16:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm4
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX2-NEXT: vpshufb %ymm4, %ymm5, %ymm4
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX2-NEXT: vpshufb %ymm0, %ymm6, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
; AVX2-NEXT: vpshufb %ymm2, %ymm5, %ymm2
; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpshufb %ymm1, %ymm6, %ymm1
; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v32i16:
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
; AVX512F-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm4
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX512F-NEXT: vpshufb %ymm4, %ymm5, %ymm4
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX512F-NEXT: vpshufb %ymm0, %ymm6, %ymm0
; AVX512F-NEXT: vpor %ymm0, %ymm4, %ymm0
; AVX512F-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm2
; AVX512F-NEXT: vpshufb %ymm2, %ymm5, %ymm2
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm1
; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpshufb %ymm1, %ymm6, %ymm1
; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v32i16:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30,33,32,35,34,37,36,39,38,41,40,43,42,45,44,47,46,49,48,51,50,53,52,55,54,57,56,59,58,61,60,63,62]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX512BW-NEXT: vpshufb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v32i16:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm0, %xmm0, %xmm0
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v32i16:
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm0, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; XOPAVX2-NEXT: retq
%b = call <32 x i16> @llvm.bitreverse.v32i16(<32 x i16> %a)
ret <32 x i16> %b
}
define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v16i32:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm4, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm5
; SSE2-NEXT: psllw $4, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm8, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: pand %xmm5, %xmm7
; SSE2-NEXT: psllw $2, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; SSE2-NEXT: pand %xmm9, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm10, %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; SSE2-NEXT: pand %xmm11, %xmm0
; SSE2-NEXT: por %xmm7, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: pand %xmm7, %xmm6
; SSE2-NEXT: psrlw $1, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE2-NEXT: pand %xmm12, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm13 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm13, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: psrlw $4, %xmm1
; SSE2-NEXT: pand %xmm8, %xmm1
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: psllw $2, %xmm6
; SSE2-NEXT: pand %xmm9, %xmm6
; SSE2-NEXT: pand %xmm10, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm11, %xmm1
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pand %xmm7, %xmm6
; SSE2-NEXT: psrlw $1, %xmm6
; SSE2-NEXT: pand %xmm12, %xmm6
; SSE2-NEXT: pand %xmm13, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3],xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm2
; SSE2-NEXT: psrlw $4, %xmm2
; SSE2-NEXT: pand %xmm8, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: psllw $2, %xmm6
; SSE2-NEXT: pand %xmm9, %xmm6
; SSE2-NEXT: pand %xmm10, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
; SSE2-NEXT: pand %xmm11, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pand %xmm7, %xmm6
; SSE2-NEXT: psrlw $1, %xmm6
; SSE2-NEXT: pand %xmm12, %xmm6
; SSE2-NEXT: pand %xmm13, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm6, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm3
; SSE2-NEXT: psrlw $4, %xmm3
; SSE2-NEXT: pand %xmm8, %xmm3
; SSE2-NEXT: por %xmm6, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
; SSE2-NEXT: pand %xmm9, %xmm5
; SSE2-NEXT: pand %xmm10, %xmm3
; SSE2-NEXT: psrlw $2, %xmm3
; SSE2-NEXT: pand %xmm11, %xmm3
; SSE2-NEXT: por %xmm5, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm7
; SSE2-NEXT: psrlw $1, %xmm7
; SSE2-NEXT: pand %xmm12, %xmm7
; SSE2-NEXT: pand %xmm13, %xmm3
; SSE2-NEXT: paddb %xmm3, %xmm3
; SSE2-NEXT: por %xmm7, %xmm3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v16i32:
; SSSE3: # BB#0:
; SSSE3-NEXT: movdqa %xmm1, %xmm5
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; SSSE3-NEXT: pshufb %xmm8, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: pand %xmm9, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm7 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: movdqa %xmm7, %xmm6
; SSSE3-NEXT: pshufb %xmm0, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm1
; SSSE3-NEXT: pand %xmm9, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: movdqa %xmm4, %xmm0
; SSSE3-NEXT: pshufb %xmm1, %xmm0
; SSSE3-NEXT: por %xmm6, %xmm0
; SSSE3-NEXT: pshufb %xmm8, %xmm5
; SSSE3-NEXT: movdqa %xmm5, %xmm1
; SSSE3-NEXT: pand %xmm9, %xmm1
; SSSE3-NEXT: movdqa %xmm7, %xmm6
; SSSE3-NEXT: pshufb %xmm1, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm5
; SSSE3-NEXT: pand %xmm9, %xmm5
; SSSE3-NEXT: movdqa %xmm4, %xmm1
; SSSE3-NEXT: pshufb %xmm5, %xmm1
; SSSE3-NEXT: por %xmm6, %xmm1
; SSSE3-NEXT: pshufb %xmm8, %xmm2
; SSSE3-NEXT: movdqa %xmm2, %xmm5
; SSSE3-NEXT: pand %xmm9, %xmm5
; SSSE3-NEXT: movdqa %xmm7, %xmm6
; SSSE3-NEXT: pshufb %xmm5, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm2
; SSSE3-NEXT: pand %xmm9, %xmm2
; SSSE3-NEXT: movdqa %xmm4, %xmm5
; SSSE3-NEXT: pshufb %xmm2, %xmm5
; SSSE3-NEXT: por %xmm6, %xmm5
; SSSE3-NEXT: pshufb %xmm8, %xmm3
; SSSE3-NEXT: movdqa %xmm3, %xmm2
; SSSE3-NEXT: pand %xmm9, %xmm2
; SSSE3-NEXT: pshufb %xmm2, %xmm7
; SSSE3-NEXT: psrlw $4, %xmm3
; SSSE3-NEXT: pand %xmm9, %xmm3
; SSSE3-NEXT: pshufb %xmm3, %xmm4
; SSSE3-NEXT: por %xmm7, %xmm4
; SSSE3-NEXT: movdqa %xmm5, %xmm2
; SSSE3-NEXT: movdqa %xmm4, %xmm3
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v16i32:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm7, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm5
; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm0, %xmm7, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm5
; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm7, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm7, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v16i32:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm4
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX2-NEXT: vpshufb %ymm4, %ymm5, %ymm4
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX2-NEXT: vpshufb %ymm0, %ymm6, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
; AVX2-NEXT: vpshufb %ymm2, %ymm5, %ymm2
; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpshufb %ymm1, %ymm6, %ymm1
; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v16i32:
; AVX512F: # BB#0:
; AVX512F-NEXT: vpsrld $24, %zmm0, %zmm1
; AVX512F-NEXT: vpsrld $8, %zmm0, %zmm2
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm2, %zmm2
; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1
; AVX512F-NEXT: vpslld $24, %zmm0, %zmm2
; AVX512F-NEXT: vpslld $8, %zmm0, %zmm0
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm1
; AVX512F-NEXT: vpslld $4, %zmm1, %zmm1
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: vpsrld $4, %zmm0, %zmm0
; AVX512F-NEXT: vpord %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm1
; AVX512F-NEXT: vpslld $2, %zmm1, %zmm1
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: vpsrld $2, %zmm0, %zmm0
; AVX512F-NEXT: vpord %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm1
; AVX512F-NEXT: vpslld $1, %zmm1, %zmm1
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: vpsrld $1, %zmm0, %zmm0
; AVX512F-NEXT: vpord %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v16i32:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28,35,34,33,32,39,38,37,36,43,42,41,40,47,46,45,44,51,50,49,48,55,54,53,52,59,58,57,56,63,62,61,60]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX512BW-NEXT: vpshufb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v16i32:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm0, %xmm0, %xmm0
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v16i32:
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm0, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; XOPAVX2-NEXT: retq
%b = call <16 x i32> @llvm.bitreverse.v16i32(<16 x i32> %a)
ret <16 x i32> %b
}
define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; SSE2-LABEL: test_bitreverse_v8i64:
; SSE2: # BB#0:
; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm4, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm5
; SSE2-NEXT: psllw $4, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand %xmm8, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: pand %xmm5, %xmm7
; SSE2-NEXT: psllw $2, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; SSE2-NEXT: pand %xmm9, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204]
; SSE2-NEXT: pand %xmm10, %xmm0
; SSE2-NEXT: psrlw $2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; SSE2-NEXT: pand %xmm11, %xmm0
; SSE2-NEXT: por %xmm7, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170]
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: pand %xmm7, %xmm6
; SSE2-NEXT: psrlw $1, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE2-NEXT: pand %xmm12, %xmm6
; SSE2-NEXT: movdqa {{.*#+}} xmm13 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE2-NEXT: pand %xmm13, %xmm0
; SSE2-NEXT: paddb %xmm0, %xmm0
; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: psrlw $4, %xmm1
; SSE2-NEXT: pand %xmm8, %xmm1
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: psllw $2, %xmm6
; SSE2-NEXT: pand %xmm9, %xmm6
; SSE2-NEXT: pand %xmm10, %xmm1
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand %xmm11, %xmm1
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: pand %xmm7, %xmm6
; SSE2-NEXT: psrlw $1, %xmm6
; SSE2-NEXT: pand %xmm12, %xmm6
; SSE2-NEXT: pand %xmm13, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm1
; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3],xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm2
; SSE2-NEXT: psrlw $4, %xmm2
; SSE2-NEXT: pand %xmm8, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: psllw $2, %xmm6
; SSE2-NEXT: pand %xmm9, %xmm6
; SSE2-NEXT: pand %xmm10, %xmm2
; SSE2-NEXT: psrlw $2, %xmm2
; SSE2-NEXT: pand %xmm11, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pand %xmm7, %xmm6
; SSE2-NEXT: psrlw $1, %xmm6
; SSE2-NEXT: pand %xmm12, %xmm6
; SSE2-NEXT: pand %xmm13, %xmm2
; SSE2-NEXT: paddb %xmm2, %xmm2
; SSE2-NEXT: por %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4]
; SSE2-NEXT: packuswb %xmm6, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pand %xmm8, %xmm6
; SSE2-NEXT: psllw $4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm3
; SSE2-NEXT: psrlw $4, %xmm3
; SSE2-NEXT: pand %xmm8, %xmm3
; SSE2-NEXT: por %xmm6, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: psllw $2, %xmm5
; SSE2-NEXT: pand %xmm9, %xmm5
; SSE2-NEXT: pand %xmm10, %xmm3
; SSE2-NEXT: psrlw $2, %xmm3
; SSE2-NEXT: pand %xmm11, %xmm3
; SSE2-NEXT: por %xmm5, %xmm3
; SSE2-NEXT: pand %xmm3, %xmm7
; SSE2-NEXT: psrlw $1, %xmm7
; SSE2-NEXT: pand %xmm12, %xmm7
; SSE2-NEXT: pand %xmm13, %xmm3
; SSE2-NEXT: paddb %xmm3, %xmm3
; SSE2-NEXT: por %xmm7, %xmm3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test_bitreverse_v8i64:
; SSSE3: # BB#0:
; SSSE3-NEXT: movdqa %xmm1, %xmm5
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; SSSE3-NEXT: pshufb %xmm8, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: pand %xmm9, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm7 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; SSSE3-NEXT: movdqa %xmm7, %xmm6
; SSSE3-NEXT: pshufb %xmm0, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm1
; SSSE3-NEXT: pand %xmm9, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; SSSE3-NEXT: movdqa %xmm4, %xmm0
; SSSE3-NEXT: pshufb %xmm1, %xmm0
; SSSE3-NEXT: por %xmm6, %xmm0
; SSSE3-NEXT: pshufb %xmm8, %xmm5
; SSSE3-NEXT: movdqa %xmm5, %xmm1
; SSSE3-NEXT: pand %xmm9, %xmm1
; SSSE3-NEXT: movdqa %xmm7, %xmm6
; SSSE3-NEXT: pshufb %xmm1, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm5
; SSSE3-NEXT: pand %xmm9, %xmm5
; SSSE3-NEXT: movdqa %xmm4, %xmm1
; SSSE3-NEXT: pshufb %xmm5, %xmm1
; SSSE3-NEXT: por %xmm6, %xmm1
; SSSE3-NEXT: pshufb %xmm8, %xmm2
; SSSE3-NEXT: movdqa %xmm2, %xmm5
; SSSE3-NEXT: pand %xmm9, %xmm5
; SSSE3-NEXT: movdqa %xmm7, %xmm6
; SSSE3-NEXT: pshufb %xmm5, %xmm6
; SSSE3-NEXT: psrlw $4, %xmm2
; SSSE3-NEXT: pand %xmm9, %xmm2
; SSSE3-NEXT: movdqa %xmm4, %xmm5
; SSSE3-NEXT: pshufb %xmm2, %xmm5
; SSSE3-NEXT: por %xmm6, %xmm5
; SSSE3-NEXT: pshufb %xmm8, %xmm3
; SSSE3-NEXT: movdqa %xmm3, %xmm2
; SSSE3-NEXT: pand %xmm9, %xmm2
; SSSE3-NEXT: pshufb %xmm2, %xmm7
; SSSE3-NEXT: psrlw $4, %xmm3
; SSSE3-NEXT: pand %xmm9, %xmm3
; SSSE3-NEXT: pshufb %xmm3, %xmm4
; SSSE3-NEXT: por %xmm7, %xmm4
; SSSE3-NEXT: movdqa %xmm5, %xmm2
; SSSE3-NEXT: movdqa %xmm4, %xmm3
; SSSE3-NEXT: retq
;
; AVX1-LABEL: test_bitreverse_v8i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm7, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm5
; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm0, %xmm7, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm5
; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm7, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm7, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_bitreverse_v8i64:
; AVX2: # BB#0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm4
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX2-NEXT: vpshufb %ymm4, %ymm5, %ymm4
; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX2-NEXT: vpshufb %ymm0, %ymm6, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
; AVX2-NEXT: vpshufb %ymm2, %ymm5, %ymm2
; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpshufb %ymm1, %ymm6, %ymm1
; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_bitreverse_v8i64:
; AVX512F: # BB#0:
; AVX512F-NEXT: vpsrlq $56, %zmm0, %zmm1
; AVX512F-NEXT: vpsrlq $40, %zmm0, %zmm2
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2
; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1
; AVX512F-NEXT: vpsrlq $24, %zmm0, %zmm2
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2
; AVX512F-NEXT: vpsrlq $8, %zmm0, %zmm3
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm3, %zmm3
; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1
; AVX512F-NEXT: vporq %zmm1, %zmm3, %zmm1
; AVX512F-NEXT: vpsllq $8, %zmm0, %zmm2
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2
; AVX512F-NEXT: vpsllq $24, %zmm0, %zmm3
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm3, %zmm3
; AVX512F-NEXT: vporq %zmm2, %zmm3, %zmm2
; AVX512F-NEXT: vpsllq $56, %zmm0, %zmm3
; AVX512F-NEXT: vpsllq $40, %zmm0, %zmm0
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm2, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm0, %zmm3, %zmm0
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm1
; AVX512F-NEXT: vpsllq $4, %zmm1, %zmm1
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: vpsrlq $4, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm1
; AVX512F-NEXT: vpsllq $2, %zmm1, %zmm1
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: vpsrlq $2, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm1
; AVX512F-NEXT: vpsllq $1, %zmm1, %zmm1
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: vpsrlq $1, %zmm0, %zmm0
; AVX512F-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_bitreverse_v8i64:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24,39,38,37,36,35,34,33,32,47,46,45,44,43,42,41,40,55,54,53,52,51,50,49,48,63,62,61,60,59,58,57,56]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX512BW-NEXT: vpshufb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vporq %zmm0, %zmm2, %zmm0
; AVX512BW-NEXT: retq
;
; XOPAVX1-LABEL: test_bitreverse_v8i64:
; XOPAVX1: # BB#0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm0, %xmm0, %xmm0
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX1-NEXT: vpperm %xmm3, %xmm1, %xmm0, %xmm1
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: test_bitreverse_v8i64:
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm0, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm2, %xmm0, %xmm2
; XOPAVX2-NEXT: vpperm %xmm3, %xmm1, %xmm0, %xmm1
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; XOPAVX2-NEXT: retq
%b = call <8 x i64> @llvm.bitreverse.v8i64(<8 x i64> %a)
ret <8 x i64> %b
}
;
; Constant Folding
;
define i32 @fold_bitreverse_i32() nounwind {
; ALL-LABEL: fold_bitreverse_i32:
; ALL: # BB#0:
; ALL-NEXT: movl $16711935, %eax # imm = 0xFF00FF
; ALL-NEXT: retq
%b = call i32 @llvm.bitreverse.i32(i32 4278255360)
ret i32 %b
}
define <16 x i8> @fold_bitreverse_v16i8() nounwind {
; SSE-LABEL: fold_bitreverse_v16i8:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
; SSE-NEXT: retq
;
; AVX-LABEL: fold_bitreverse_v16i8:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
; AVX-NEXT: retq
;
; XOP-LABEL: fold_bitreverse_v16i8:
; XOP: # BB#0:
; XOP-NEXT: vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
; XOP-NEXT: retq
%b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> <i8 0, i8 -1, i8 2, i8 -3, i8 4, i8 -5, i8 6, i8 -7, i8 8, i8 -9, i8 10, i8 -11, i8 12, i8 -13, i8 14, i8 -15>)
ret <16 x i8> %b
}
define <16 x i16> @fold_bitreverse_v16i16() nounwind {
; SSE-LABEL: fold_bitreverse_v16i16:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,65535,16384,49151,8192,57343,24576,40959]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [4096,61439,20480,45055,12288,53247,28672,36863]
; SSE-NEXT: retq
;
; AVX-LABEL: fold_bitreverse_v16i16:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
; AVX-NEXT: retq
;
; XOP-LABEL: fold_bitreverse_v16i16:
; XOP: # BB#0:
; XOP-NEXT: vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
; XOP-NEXT: retq
%b = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> <i16 0, i16 -1, i16 2, i16 -3, i16 4, i16 -5, i16 6, i16 -7, i16 8, i16 -9, i16 10, i16 -11, i16 12, i16 -13, i16 14, i16 -15>)
ret <16 x i16> %b
}
define <16 x i32> @fold_bitreverse_v16i32() nounwind {
; SSE-LABEL: fold_bitreverse_v16i32:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,4294967295,1073741824,3221225471]
; SSE-NEXT: movaps {{.*#+}} xmm1 = [536870912,3758096383,1610612736,2684354559]
; SSE-NEXT: movaps {{.*#+}} xmm2 = [268435456,4026531839,1342177280,2952790015]
; SSE-NEXT: movaps {{.*#+}} xmm3 = [805306368,3489660927,1879048192,2415919103]
; SSE-NEXT: retq
;
; AVX1-LABEL: fold_bitreverse_v16i32:
; AVX1: # BB#0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; AVX1-NEXT: retq
;
; AVX2-LABEL: fold_bitreverse_v16i32:
; AVX2: # BB#0:
; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; AVX2-NEXT: retq
;
; AVX512-LABEL: fold_bitreverse_v16i32:
; AVX512: # BB#0:
; AVX512-NEXT: vmovaps {{.*#+}} zmm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559,268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; AVX512-NEXT: retq
;
; XOP-LABEL: fold_bitreverse_v16i32:
; XOP: # BB#0:
; XOP-NEXT: vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
; XOP-NEXT: vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
; XOP-NEXT: retq
%b = call <16 x i32> @llvm.bitreverse.v16i32(<16 x i32> <i32 0, i32 -1, i32 2, i32 -3, i32 4, i32 -5, i32 6, i32 -7, i32 8, i32 -9, i32 10, i32 -11, i32 12, i32 -13, i32 14, i32 -15>)
ret <16 x i32> %b
}
declare i8 @llvm.bitreverse.i8(i8) readnone
declare i16 @llvm.bitreverse.i16(i16) readnone
declare i32 @llvm.bitreverse.i32(i32) readnone
declare i64 @llvm.bitreverse.i64(i64) readnone
declare <16 x i8> @llvm.bitreverse.v16i8(<16 x i8>) readnone
declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>) readnone
declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) readnone
declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>) readnone
declare <32 x i8> @llvm.bitreverse.v32i8(<32 x i8>) readnone
declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>) readnone
declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>) readnone
declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) readnone
declare <64 x i8> @llvm.bitreverse.v64i8(<64 x i8>) readnone
declare <32 x i16> @llvm.bitreverse.v32i16(<32 x i16>) readnone
declare <16 x i32> @llvm.bitreverse.v16i32(<16 x i32>) readnone
declare <8 x i64> @llvm.bitreverse.v8i64(<8 x i64>) readnone