2016-10-26 05:14:11 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
|
|
|
|
|
|
|
|
; fold (srem undef, x) -> 0
|
|
|
|
define <4 x i32> @combine_vec_srem_undef0(<4 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_srem_undef0:
|
|
|
|
; SSE: # BB#0:
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_srem_undef0:
|
|
|
|
; AVX: # BB#0:
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = srem <4 x i32> undef, %x
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; fold (srem x, undef) -> undef
|
|
|
|
define <4 x i32> @combine_vec_srem_undef1(<4 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_srem_undef1:
|
|
|
|
; SSE: # BB#0:
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_srem_undef1:
|
|
|
|
; AVX: # BB#0:
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = srem <4 x i32> %x, undef
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; fold (srem x, y) -> (urem x, y) iff x and y are positive
|
|
|
|
define <4 x i32> @combine_vec_srem_by_pos0(<4 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_srem_by_pos0:
|
|
|
|
; SSE: # BB#0:
|
2016-10-26 05:20:18 +08:00
|
|
|
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
|
2016-10-26 05:14:11 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_srem_by_pos0:
|
|
|
|
; AVX: # BB#0:
|
2016-10-26 05:20:18 +08:00
|
|
|
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
|
|
|
|
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
|
2016-10-26 05:14:11 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
|
|
|
|
%2 = srem <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
|
|
|
|
ret <4 x i32> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @combine_vec_srem_by_pos1(<4 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_srem_by_pos1:
|
|
|
|
; SSE: # BB#0:
|
|
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
|
|
|
; SSE-NEXT: pextrd $3, %xmm0, %eax
|
2016-10-26 05:20:18 +08:00
|
|
|
; SSE-NEXT: andl $15, %eax
|
2016-10-26 05:14:11 +08:00
|
|
|
; SSE-NEXT: movd %eax, %xmm1
|
|
|
|
; SSE-NEXT: pextrd $2, %xmm0, %eax
|
2016-10-26 05:20:18 +08:00
|
|
|
; SSE-NEXT: andl $7, %eax
|
2016-10-26 05:14:11 +08:00
|
|
|
; SSE-NEXT: movd %eax, %xmm2
|
|
|
|
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
|
|
|
; SSE-NEXT: pextrd $1, %xmm0, %eax
|
2016-10-26 05:20:18 +08:00
|
|
|
; SSE-NEXT: andl $3, %eax
|
2016-10-26 05:14:11 +08:00
|
|
|
; SSE-NEXT: movd %eax, %xmm0
|
|
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
|
2016-12-05 19:25:13 +08:00
|
|
|
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
|
2016-10-26 05:14:11 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_srem_by_pos1:
|
|
|
|
; AVX: # BB#0:
|
|
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vpextrd $3, %xmm0, %eax
|
2016-10-26 05:20:18 +08:00
|
|
|
; AVX-NEXT: andl $15, %eax
|
2016-10-26 05:14:11 +08:00
|
|
|
; AVX-NEXT: vmovd %eax, %xmm1
|
|
|
|
; AVX-NEXT: vpextrd $2, %xmm0, %eax
|
2016-10-26 05:20:18 +08:00
|
|
|
; AVX-NEXT: andl $7, %eax
|
2016-10-26 05:14:11 +08:00
|
|
|
; AVX-NEXT: vmovd %eax, %xmm2
|
|
|
|
; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
|
|
|
; AVX-NEXT: vpextrd $1, %xmm0, %eax
|
2016-10-26 05:20:18 +08:00
|
|
|
; AVX-NEXT: andl $3, %eax
|
2016-10-26 05:14:11 +08:00
|
|
|
; AVX-NEXT: vmovd %eax, %xmm0
|
|
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
|
2016-12-05 19:25:13 +08:00
|
|
|
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
2016-10-26 05:14:11 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
|
|
|
|
%2 = srem <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16>
|
|
|
|
ret <4 x i32> %2
|
|
|
|
}
|