|
|
|
@ -2588,206 +2588,166 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind
|
|
|
|
|
; AVX2-NEXT: pushq %r13
|
|
|
|
|
; AVX2-NEXT: pushq %r12
|
|
|
|
|
; AVX2-NEXT: pushq %rbx
|
|
|
|
|
; AVX2-NEXT: subq $16, %rsp
|
|
|
|
|
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
|
|
|
|
|
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
|
|
|
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm4, %rbx
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm4, %rbp
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm3, %rdi
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm3, %rcx
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm3, %rdx
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm3, %r9
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm2, %r13
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm2, %r12
|
|
|
|
|
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm3, %r14
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm1, %r10
|
|
|
|
|
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
|
|
|
|
|
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
|
|
|
|
|
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm9
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
|
|
|
|
|
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm4, %rax
|
|
|
|
|
; AVX2-NEXT: addq %rbx, %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, %rbx
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm4, %rsi
|
|
|
|
|
; AVX2-NEXT: addq %rbp, %rsi
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm3, %rax
|
|
|
|
|
; AVX2-NEXT: addq %rdi, %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, %rdi
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm3, %r11
|
|
|
|
|
; AVX2-NEXT: addq %rcx, %r11
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm7
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm3, %rcx
|
|
|
|
|
; AVX2-NEXT: addq %rdx, %rcx
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm3, %r8
|
|
|
|
|
; AVX2-NEXT: addq %r9, %r8
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm2, %r9
|
|
|
|
|
; AVX2-NEXT: addq %r13, %r9
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm2, %r15
|
|
|
|
|
; AVX2-NEXT: addq %r12, %r15
|
|
|
|
|
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm2, %r11
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm0
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm0, %r14
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm0, %rbx
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm2, %rsi
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm7, %r12
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm6, %r15
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm5, %rdx
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm3, %rax
|
|
|
|
|
; AVX2-NEXT: addq %r14, %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm3, %rax
|
|
|
|
|
; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm2, %rax
|
|
|
|
|
; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm2, %rax
|
|
|
|
|
; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm0, %rbp
|
|
|
|
|
; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm0, %r12
|
|
|
|
|
; AVX2-NEXT: addq %r10, %r12
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm1, %rax
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm3, %rbp
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm9, %r9
|
|
|
|
|
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm2
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm0, %r10
|
|
|
|
|
; AVX2-NEXT: addq %rax, %r10
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm1, %rax
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm0, %rdx
|
|
|
|
|
; AVX2-NEXT: addq %rax, %rdx
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm5
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm0
|
|
|
|
|
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm7
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
|
|
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm8 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm0
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
|
|
|
|
|
; AVX2-NEXT: addq %rbx, %rdi
|
|
|
|
|
; AVX2-NEXT: movq %rdi, %rbx
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm8, %r10
|
|
|
|
|
; AVX2-NEXT: addq %rsi, %r10
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm7, %rsi
|
|
|
|
|
; AVX2-NEXT: addq %r12, %rsi
|
|
|
|
|
; AVX2-NEXT: movq %rsi, %r12
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm4, %r13
|
|
|
|
|
; AVX2-NEXT: addq %r15, %r13
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm5, %r15
|
|
|
|
|
; AVX2-NEXT: addq %rdx, %r15
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm3, %r8
|
|
|
|
|
; AVX2-NEXT: addq %rcx, %r8
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm6, %rsi
|
|
|
|
|
; AVX2-NEXT: addq %rax, %rsi
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm6, %rdx
|
|
|
|
|
; AVX2-NEXT: addq %rbp, %rdx
|
|
|
|
|
; AVX2-NEXT: vpextrq $1, %xmm2, %rcx
|
|
|
|
|
; AVX2-NEXT: addq %r9, %rcx
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm0, %rdi
|
|
|
|
|
; AVX2-NEXT: leaq -1(%r14,%rdi), %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm8, %rdi
|
|
|
|
|
; AVX2-NEXT: leaq -1(%r11,%rdi), %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm7, %rdi
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: leaq -1(%rax,%rdi), %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm4, %rdi
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: leaq -1(%rax,%rdi), %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm5, %rdi
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: leaq -1(%rax,%rdi), %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm1, %rdi
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm3, %rbp
|
|
|
|
|
; AVX2-NEXT: leaq -1(%rdi,%rbp), %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm9, %rdi
|
|
|
|
|
; AVX2-NEXT: vmovq %xmm2, %rbp
|
|
|
|
|
; AVX2-NEXT: leaq -1(%rdi,%rbp), %rdi
|
|
|
|
|
; AVX2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: addq $-1, %rbx
|
|
|
|
|
; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %eax
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: addq $-1, %rsi
|
|
|
|
|
; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %eax
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, (%rsp) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: addq $-1, %rdi
|
|
|
|
|
; AVX2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %eax
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: addq $-1, %r11
|
|
|
|
|
; AVX2-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %eax
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: addq $-1, %rcx
|
|
|
|
|
; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %eax
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: addq $-1, %r8
|
|
|
|
|
; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %eax
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: addq $-1, %r9
|
|
|
|
|
; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %eax
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rax
|
|
|
|
|
; AVX2-NEXT: movq %rax, %rsi
|
|
|
|
|
; AVX2-NEXT: addq $-1, %r15
|
|
|
|
|
; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %r15d
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %r15
|
|
|
|
|
; AVX2-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %r13d
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %r13
|
|
|
|
|
; AVX2-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %r14d
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %r14
|
|
|
|
|
; AVX2-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %ebx
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rbx
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: addq $-1, %rax
|
|
|
|
|
; AVX2-NEXT: movl $0, %r11d
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %r11
|
|
|
|
|
; AVX2-NEXT: addq $-1, %rbp
|
|
|
|
|
; AVX2-NEXT: movl $0, %r9d
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %r9
|
|
|
|
|
; AVX2-NEXT: addq $-1, %r12
|
|
|
|
|
; AVX2-NEXT: movl $0, %r8d
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %r8
|
|
|
|
|
; AVX2-NEXT: addq $-1, %r10
|
|
|
|
|
; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %edi
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rdi
|
|
|
|
|
; AVX2-NEXT: addq $-1, %r12
|
|
|
|
|
; AVX2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: movl $0, %r11d
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %r11
|
|
|
|
|
; AVX2-NEXT: addq $-1, %r13
|
|
|
|
|
; AVX2-NEXT: movl $0, %r10d
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %r10
|
|
|
|
|
; AVX2-NEXT: addq $-1, %r15
|
|
|
|
|
; AVX2-NEXT: movl $0, %r14d
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %r14
|
|
|
|
|
; AVX2-NEXT: addq $-1, %r8
|
|
|
|
|
; AVX2-NEXT: movl $0, %ebp
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rbp
|
|
|
|
|
; AVX2-NEXT: addq $-1, %rsi
|
|
|
|
|
; AVX2-NEXT: movl $0, %r12d
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %r12
|
|
|
|
|
; AVX2-NEXT: addq $-1, %rdx
|
|
|
|
|
; AVX2-NEXT: movl $0, %ecx
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rcx
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rdx, %rcx
|
|
|
|
|
; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: shldq $63, %r10, %rdi
|
|
|
|
|
; AVX2-NEXT: shldq $63, %r12, %r8
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rbp, %r9
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rax, %r11
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: movl $0, %ebx
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rbx
|
|
|
|
|
; AVX2-NEXT: addq $-1, %rcx
|
|
|
|
|
; AVX2-NEXT: movl $0, %eax
|
|
|
|
|
; AVX2-NEXT: adcq $-1, %rax
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rcx, %rax
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rdx, %rbx
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rdx, %r14
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rdx, %r13
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rax, %r15
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rax, %rsi
|
|
|
|
|
; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rax, %rsi
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rax, %r12
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rsi, %r12
|
|
|
|
|
; AVX2-NEXT: shldq $63, %r8, %rbp
|
|
|
|
|
; AVX2-NEXT: shldq $63, %r15, %r14
|
|
|
|
|
; AVX2-NEXT: shldq $63, %r13, %r10
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rax, %rcx
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rax, %r10
|
|
|
|
|
; AVX2-NEXT: movq (%rsp), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rdx, %rax
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rdx, %rbp
|
|
|
|
|
; AVX2-NEXT: vmovq %rbp, %xmm8
|
|
|
|
|
; AVX2-NEXT: vmovq %rax, %xmm9
|
|
|
|
|
; AVX2-NEXT: vmovq %r10, %xmm0
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rcx, %r11
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rcx, %rdi
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shldq $63, %rcx, %r9
|
|
|
|
|
; AVX2-NEXT: vmovq %r9, %xmm8
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shrq %rcx
|
|
|
|
|
; AVX2-NEXT: vmovq %rcx, %xmm9
|
|
|
|
|
; AVX2-NEXT: vmovq %rdi, %xmm0
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shrq %rcx
|
|
|
|
|
; AVX2-NEXT: vmovq %rcx, %xmm1
|
|
|
|
|
; AVX2-NEXT: vmovq %r12, %xmm12
|
|
|
|
|
; AVX2-NEXT: vmovq %rsi, %xmm13
|
|
|
|
|
; AVX2-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 8-byte Folded Reload
|
|
|
|
|
; AVX2-NEXT: # xmm14 = mem[0],zero
|
|
|
|
|
; AVX2-NEXT: vmovq %r15, %xmm15
|
|
|
|
|
; AVX2-NEXT: vmovq %r13, %xmm10
|
|
|
|
|
; AVX2-NEXT: vmovq %r14, %xmm11
|
|
|
|
|
; AVX2-NEXT: vmovq %rbx, %xmm2
|
|
|
|
|
; AVX2-NEXT: vmovq %r11, %xmm3
|
|
|
|
|
; AVX2-NEXT: vmovq %r9, %xmm4
|
|
|
|
|
; AVX2-NEXT: vmovq %r8, %xmm5
|
|
|
|
|
; AVX2-NEXT: vmovq %rdi, %xmm6
|
|
|
|
|
; AVX2-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 8-byte Folded Reload
|
|
|
|
|
; AVX2-NEXT: # xmm7 = mem[0],zero
|
|
|
|
|
; AVX2-NEXT: vmovq %r11, %xmm12
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shrq %rcx
|
|
|
|
|
; AVX2-NEXT: vmovq %rcx, %xmm13
|
|
|
|
|
; AVX2-NEXT: vmovq %r10, %xmm14
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shrq %rcx
|
|
|
|
|
; AVX2-NEXT: vmovq %rcx, %xmm15
|
|
|
|
|
; AVX2-NEXT: vmovq %r14, %xmm10
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shrq %rcx
|
|
|
|
|
; AVX2-NEXT: vmovq %rcx, %xmm11
|
|
|
|
|
; AVX2-NEXT: vmovq %rbp, %xmm2
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shrq %rcx
|
|
|
|
|
; AVX2-NEXT: vmovq %rcx, %xmm3
|
|
|
|
|
; AVX2-NEXT: vmovq %r12, %xmm4
|
|
|
|
|
; AVX2-NEXT: vmovq %rbx, %xmm5
|
|
|
|
|
; AVX2-NEXT: vmovq %rax, %xmm6
|
|
|
|
|
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
|
|
|
; AVX2-NEXT: shrq %rax
|
|
|
|
|
; AVX2-NEXT: vmovq %rax, %xmm7
|
|
|
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
|
|
|
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
|
; AVX2-NEXT: vpslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1]
|
|
|
|
@ -2800,8 +2760,8 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind
|
|
|
|
|
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7]
|
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm8[3]
|
|
|
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
|
|
|
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
|
; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
|
|
|
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
|
; AVX2-NEXT: vpbroadcastw %xmm2, %xmm2
|
|
|
|
|
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5,6,7]
|
|
|
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
|
|
|
|
@ -2811,7 +2771,6 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind
|
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3]
|
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
|
|
|
|
|
; AVX2-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
|
; AVX2-NEXT: addq $16, %rsp
|
|
|
|
|
; AVX2-NEXT: popq %rbx
|
|
|
|
|
; AVX2-NEXT: popq %r12
|
|
|
|
|
; AVX2-NEXT: popq %r13
|
|
|
|
|