forked from OSchip/llvm-project
3413 lines
124 KiB
LLVM
3413 lines
124 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=X32
|
|
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=X64
|
|
|
|
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx2-builtins.c
|
|
|
|
define <4 x i64> @test_mm256_abs_epi8(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_abs_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpabsb %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_abs_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpabsb %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%call = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %arg)
|
|
%res = bitcast <32 x i8> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_abs_epi16(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_abs_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpabsw %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_abs_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpabsw %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%call = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %arg)
|
|
%res = bitcast <16 x i16> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_abs_epi32(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_abs_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpabsd %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_abs_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpabsd %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%call = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %arg)
|
|
%res = bitcast <8 x i32> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_add_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_add_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_add_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%res = add <32 x i8> %arg0, %arg1
|
|
%bc = bitcast <32 x i8> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_add_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_add_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpaddw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_add_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpaddw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = add <16 x i16> %arg0, %arg1
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_add_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_add_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_add_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = add <8 x i32> %arg0, %arg1
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_add_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_add_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpaddq %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_add_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpaddq %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = add <4 x i64> %a0, %a1
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_adds_epi8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_adds_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpaddsb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_adds_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpaddsb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%res = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %arg0, <32 x i8> %arg1)
|
|
%bc = bitcast <32 x i8> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_adds_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_adds_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpaddsw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_adds_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpaddsw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_adds_epu8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_adds_epu8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpaddusb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_adds_epu8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpaddusb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%res = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %arg0, <32 x i8> %arg1)
|
|
%bc = bitcast <32 x i8> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8>, <32 x i8>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_adds_epu16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_adds_epu16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpaddusw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_adds_epu16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpaddusw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_alignr_epi8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_alignr_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1],ymm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_alignr_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1],ymm0[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%shuf = shufflevector <32 x i8> %arg0, <32 x i8> %arg1, <32 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49>
|
|
%res = bitcast <32 x i8> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test2_mm256_alignr_epi8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test2_mm256_alignr_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0],ymm0[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test2_mm256_alignr_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm1[0],ymm0[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm1[16]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%shuf = shufflevector <32 x i8> %arg0, <32 x i8> %arg1, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
|
|
%res = bitcast <32 x i8> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_and_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_and_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vandps %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_and_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vandps %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = and <4 x i64> %a0, %a1
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_andnot_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_andnot_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
|
|
; X32-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
|
; X32-NEXT: vpand %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_andnot_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
|
|
; X64-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
|
; X64-NEXT: vpand %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%not = xor <4 x i64> %a0, <i64 -1, i64 -1, i64 -1, i64 -1>
|
|
%res = and <4 x i64> %not, %a1
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_avg_epu8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_avg_epu8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpavgb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_avg_epu8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpavgb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%res = call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> %arg0, <32 x i8> %arg1)
|
|
%bc = bitcast <32 x i8> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8>, <32 x i8>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_avg_epu16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_avg_epu16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpavgw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_avg_epu16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpavgw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_blend_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_blend_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7,8],ymm1[9],ymm0[10,11,12,13,14,15]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_blend_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7,8],ymm1[9],ymm0[10,11,12,13,14,15]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%shuf = shufflevector <16 x i16> %arg0, <16 x i16> %arg1, <16 x i32> <i32 0, i32 17, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 25, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
|
%res = bitcast <16 x i16> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <2 x i64> @test_mm_blend_epi32(<2 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_blend_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_blend_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%shuf = shufflevector <4 x i32> %arg0, <4 x i32> %arg1, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
|
|
%res = bitcast <4 x i32> %shuf to <2 x i64>
|
|
ret <2 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_blend_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_blend_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_blend_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%shuf = shufflevector <8 x i32> %arg0, <8 x i32> %arg1, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 13, i32 6, i32 7>
|
|
%res = bitcast <8 x i32> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_blendv_epi8(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2) {
|
|
; X32-LABEL: test_mm256_blendv_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_blendv_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%arg2 = bitcast <4 x i64> %a2 to <32 x i8>
|
|
%call = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %arg0, <32 x i8> %arg1, <32 x i8> %arg2)
|
|
%res = bitcast <32 x i8> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounwind readnone
|
|
|
|
define <2 x i64> @test_mm_broadcastb_epi8(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm_broadcastb_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpbroadcastb %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_broadcastb_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpbroadcastb %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
|
%shuf = shufflevector <16 x i8> %arg0, <16 x i8> undef, <16 x i32> zeroinitializer
|
|
%res = bitcast <16 x i8> %shuf to <2 x i64>
|
|
ret <2 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_broadcastb_epi8(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_broadcastb_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpbroadcastb %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_broadcastb_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpbroadcastb %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%shuf = shufflevector <32 x i8> %arg0, <32 x i8> undef, <32 x i32> zeroinitializer
|
|
%res = bitcast <32 x i8> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <2 x i64> @test_mm_broadcastd_epi32(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm_broadcastd_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vbroadcastss %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_broadcastd_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vbroadcastss %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
|
|
%shuf = shufflevector <4 x i32> %arg0, <4 x i32> undef, <4 x i32> zeroinitializer
|
|
%res = bitcast <4 x i32> %shuf to <2 x i64>
|
|
ret <2 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_broadcastd_epi32(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_broadcastd_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vbroadcastss %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_broadcastd_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vbroadcastss %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%shuf = shufflevector <8 x i32> %arg0, <8 x i32> undef, <8 x i32> zeroinitializer
|
|
%res = bitcast <8 x i32> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <2 x i64> @test_mm_broadcastq_epi64(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm_broadcastq_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpbroadcastq %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_broadcastq_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpbroadcastq %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer
|
|
ret <2 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_broadcastq_epi64(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_broadcastq_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_broadcastq_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> zeroinitializer
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <2 x double> @test_mm_broadcastsd_pd(<2 x double> %a0) {
|
|
; X32-LABEL: test_mm_broadcastsd_pd:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_broadcastsd_pd:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <4 x double> @test_mm256_broadcastsd_pd(<4 x double> %a0) {
|
|
; X32-LABEL: test_mm256_broadcastsd_pd:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_broadcastsd_pd:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> zeroinitializer
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_broadcastsi128_si256(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_broadcastsi128_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
|
|
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_broadcastsi128_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
|
|
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_broadcastsi128_si256_mem(<2 x i64>* %p0) {
|
|
; X32-LABEL: test_mm256_broadcastsi128_si256_mem:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_broadcastsi128_si256_mem:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
|
|
; X64-NEXT: retq
|
|
%a0 = load <2 x i64>, <2 x i64>* %p0
|
|
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x float> @test_mm_broadcastss_ps(<4 x float> %a0) {
|
|
; X32-LABEL: test_mm_broadcastss_ps:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vbroadcastss %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_broadcastss_ps:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vbroadcastss %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> zeroinitializer
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <8 x float> @test_mm256_broadcastss_ps(<8 x float> %a0) {
|
|
; X32-LABEL: test_mm256_broadcastss_ps:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vbroadcastss %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_broadcastss_ps:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vbroadcastss %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> zeroinitializer
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
define <2 x i64> @test_mm_broadcastw_epi16(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm_broadcastw_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpbroadcastw %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_broadcastw_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpbroadcastw %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
|
|
%shuf = shufflevector <8 x i16> %arg0, <8 x i16> undef, <8 x i32> zeroinitializer
|
|
%res = bitcast <8 x i16> %shuf to <2 x i64>
|
|
ret <2 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_broadcastw_epi16(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_broadcastw_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpbroadcastw %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_broadcastw_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpbroadcastw %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%shuf = shufflevector <16 x i16> %arg0, <16 x i16> undef, <16 x i32> zeroinitializer
|
|
%res = bitcast <16 x i16> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_bslli_epi128(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_bslli_epi128:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_bslli_epi128:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%shuf = shufflevector <32 x i8> zeroinitializer, <32 x i8> %arg0, <32 x i32> <i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60>
|
|
%res = bitcast <32 x i8> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_bsrli_epi128(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_bsrli_epi128:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_bsrli_epi128:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%shuf = shufflevector <32 x i8> %arg0, <32 x i8> zeroinitializer, <32 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50>
|
|
%res = bitcast <32 x i8> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cmpeq_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_cmpeq_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cmpeq_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%cmp = icmp eq <32 x i8> %arg0, %arg1
|
|
%res = sext <32 x i1> %cmp to <32 x i8>
|
|
%bc = bitcast <32 x i8> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cmpeq_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_cmpeq_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cmpeq_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%cmp = icmp eq <16 x i16> %arg0, %arg1
|
|
%res = sext <16 x i1> %cmp to <16 x i16>
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cmpeq_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_cmpeq_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cmpeq_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%cmp = icmp eq <8 x i32> %arg0, %arg1
|
|
%res = sext <8 x i1> %cmp to <8 x i32>
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cmpeq_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_cmpeq_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cmpeq_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%cmp = icmp eq <4 x i64> %a0, %a1
|
|
%res = sext <4 x i1> %cmp to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cmpgt_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_cmpgt_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cmpgt_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%cmp = icmp sgt <32 x i8> %arg0, %arg1
|
|
%res = sext <32 x i1> %cmp to <32 x i8>
|
|
%bc = bitcast <32 x i8> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cmpgt_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_cmpgt_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cmpgt_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%cmp = icmp sgt <16 x i16> %arg0, %arg1
|
|
%res = sext <16 x i1> %cmp to <16 x i16>
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cmpgt_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_cmpgt_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cmpgt_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%cmp = icmp sgt <8 x i32> %arg0, %arg1
|
|
%res = sext <8 x i1> %cmp to <8 x i32>
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cmpgt_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_cmpgt_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cmpgt_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%cmp = icmp sgt <4 x i64> %a0, %a1
|
|
%res = sext <4 x i1> %cmp to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepi8_epi16(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepi8_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepi8_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
|
%ext = sext <16 x i8> %arg0 to <16 x i16>
|
|
%res = bitcast <16 x i16> %ext to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepi8_epi32(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepi8_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovsxbd %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepi8_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovsxbd %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
|
%shuf = shufflevector <16 x i8> %arg0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
%ext = sext <8 x i8> %shuf to <8 x i32>
|
|
%res = bitcast <8 x i32> %ext to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepi8_epi64(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepi8_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovsxbq %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepi8_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovsxbq %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
|
%shuf = shufflevector <16 x i8> %arg0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
%ext = sext <4 x i8> %shuf to <4 x i64>
|
|
ret <4 x i64> %ext
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepi16_epi32(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepi16_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepi16_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
|
|
%ext = sext <8 x i16> %arg0 to <8 x i32>
|
|
%res = bitcast <8 x i32> %ext to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepi16_epi64(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepi16_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovsxwq %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepi16_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovsxwq %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
|
|
%shuf = shufflevector <8 x i16> %arg0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
%ext = sext <4 x i16> %shuf to <4 x i64>
|
|
ret <4 x i64> %ext
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepi32_epi64(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepi32_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovsxdq %xmm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepi32_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
|
|
%ext = sext <4 x i32> %arg0 to <4 x i64>
|
|
ret <4 x i64> %ext
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepu8_epi16(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepu8_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepu8_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
|
%ext = zext <16 x i8> %arg0 to <16 x i16>
|
|
%res = bitcast <16 x i16> %ext to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepu8_epi32(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepu8_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepu8_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
|
%shuf = shufflevector <16 x i8> %arg0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
%ext = zext <8 x i8> %shuf to <8 x i32>
|
|
%res = bitcast <8 x i32> %ext to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepu8_epi64(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepu8_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepu8_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
|
%shuf = shufflevector <16 x i8> %arg0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
%ext = zext <4 x i8> %shuf to <4 x i64>
|
|
ret <4 x i64> %ext
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepu16_epi32(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepu16_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepu16_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
|
|
%ext = zext <8 x i16> %arg0 to <8 x i32>
|
|
%res = bitcast <8 x i32> %ext to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepu16_epi64(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepu16_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepu16_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
|
|
%shuf = shufflevector <8 x i16> %arg0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
%ext = zext <4 x i16> %shuf to <4 x i64>
|
|
ret <4 x i64> %ext
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_cvtepu32_epi64(<2 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_cvtepu32_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_cvtepu32_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
|
|
%ext = zext <4 x i32> %arg0 to <4 x i64>
|
|
ret <4 x i64> %ext
|
|
}
|
|
|
|
define <2 x i64> @test_mm256_extracti128_si256(<4 x i64> %a0) nounwind {
|
|
; X32-LABEL: test_mm256_extracti128_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; X32-NEXT: vzeroupper
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_extracti128_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <4 x i64> %a0, <4 x i64> %a0, <2 x i32> <i32 2, i32 3>
|
|
ret <2 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_hadd_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_hadd_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vphaddw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_hadd_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vphaddw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_hadd_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_hadd_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vphaddd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_hadd_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vphaddd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %arg0, <8 x i32> %arg1)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_hadds_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_hadds_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vphaddsw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_hadds_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vphaddsw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_hsub_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_hsub_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vphsubw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_hsub_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vphsubw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_hsub_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_hsub_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vphsubd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_hsub_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vphsubd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %arg0, <8 x i32> %arg1)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_hsubs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_hsubs_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vphsubsw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_hsubs_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vphsubsw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_i32gather_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vpgatherdd %xmm2, (%eax,%xmm0,2), %xmm1
|
|
; X32-NEXT: vmovdqa %xmm1, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_i32gather_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm0,2), %xmm1
|
|
; X64-NEXT: vmovdqa %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i32 *%a0 to i8*
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%mask = bitcast <2 x i64> <i64 -1, i64 -1> to <4 x i32>
|
|
%call = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> undef, i8* %arg0, <4 x i32> %arg1, <4 x i32> %mask, i8 2)
|
|
%bc = bitcast <4 x i32> %call to <2 x i64>
|
|
ret <2 x i64> %bc
|
|
}
|
|
declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*, <4 x i32>, <4 x i32>, i8) nounwind readonly
|
|
|
|
define <2 x i64> @test_mm_mask_i32gather_epi32(<2 x i64> %a0, i32 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
|
|
; X32-LABEL: test_mm_mask_i32gather_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpgatherdd %xmm2, (%eax,%xmm1,2), %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_mask_i32gather_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
|
|
%arg1 = bitcast i32 *%a1 to i8*
|
|
%arg2 = bitcast <2 x i64> %a2 to <4 x i32>
|
|
%arg3 = bitcast <2 x i64> %a3 to <4 x i32>
|
|
%call = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %arg0, i8* %arg1, <4 x i32> %arg2, <4 x i32> %arg3, i8 2)
|
|
%bc = bitcast <4 x i32> %call to <2 x i64>
|
|
ret <2 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_i32gather_epi32(i32 *%a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_i32gather_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
|
|
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vpgatherdd %ymm2, (%eax,%ymm0,2), %ymm1
|
|
; X32-NEXT: vmovdqa %ymm1, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_i32gather_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
|
|
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vpgatherdd %ymm2, (%rdi,%ymm0,2), %ymm1
|
|
; X64-NEXT: vmovdqa %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i32 *%a0 to i8*
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%mask = bitcast <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1> to <8 x i32>
|
|
%call = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> undef, i8* %arg0, <8 x i32> %arg1, <8 x i32> %mask, i8 2)
|
|
%bc = bitcast <8 x i32> %call to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, i8*, <8 x i32>, <8 x i32>, i8) nounwind readonly
|
|
|
|
define <4 x i64> @test_mm256_mask_i32gather_epi32(<4 x i64> %a0, i32 *%a1, <4 x i64> %a2, <4 x i64> %a3) {
|
|
; X32-LABEL: test_mm256_mask_i32gather_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpgatherdd %ymm2, (%eax,%ymm1,2), %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mask_i32gather_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast i32 *%a1 to i8*
|
|
%arg2 = bitcast <4 x i64> %a2 to <8 x i32>
|
|
%arg3 = bitcast <4 x i64> %a3 to <8 x i32>
|
|
%call = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %arg0, i8* %arg1, <8 x i32> %arg2, <8 x i32> %arg3, i8 2)
|
|
%bc = bitcast <8 x i32> %call to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <2 x i64> @test_mm_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_i32gather_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vpgatherdq %xmm2, (%eax,%xmm0,2), %xmm1
|
|
; X32-NEXT: vmovdqa %xmm1, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_i32gather_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vpgatherdq %xmm2, (%rdi,%xmm0,2), %xmm1
|
|
; X64-NEXT: vmovdqa %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i64 *%a0 to i8*
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%res = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> undef, i8* %arg0, <4 x i32> %arg1, <2 x i64> <i64 -1, i64 -1>, i8 2)
|
|
ret <2 x i64> %res
|
|
}
|
|
declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, i8*, <4 x i32>, <2 x i64>, i8) nounwind readonly
|
|
|
|
define <2 x i64> @test_mm_mask_i32gather_epi64(<2 x i64> %a0, i64 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
|
|
; X32-LABEL: test_mm_mask_i32gather_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpgatherdq %xmm2, (%eax,%xmm1,2), %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_mask_i32gather_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast i64 *%a1 to i8*
|
|
%arg2 = bitcast <2 x i64> %a2 to <4 x i32>
|
|
%res = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> %a0, i8* %arg1, <4 x i32> %arg2, <2 x i64> %a3, i8 2)
|
|
ret <2 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_i32gather_epi64(i64 *%a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_i32gather_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
|
|
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vpgatherdq %ymm2, (%eax,%xmm0,2), %ymm1
|
|
; X32-NEXT: vmovdqa %ymm1, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_i32gather_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
|
|
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vpgatherdq %ymm2, (%rdi,%xmm0,2), %ymm1
|
|
; X64-NEXT: vmovdqa %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i64 *%a0 to i8*
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%res = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> undef, i8* %arg0, <4 x i32> %arg1, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, i8 2)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, i8*, <4 x i32>, <4 x i64>, i8) nounwind readonly
|
|
|
|
define <4 x i64> @test_mm256_mask_i32gather_epi64(<4 x i64> %a0, i64 *%a1, <2 x i64> %a2, <4 x i64> %a3) {
|
|
; X32-LABEL: test_mm256_mask_i32gather_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpgatherdq %ymm2, (%eax,%xmm1,2), %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mask_i32gather_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast i64 *%a1 to i8*
|
|
%arg2 = bitcast <2 x i64> %a2 to <4 x i32>
|
|
%res = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %a0, i8* %arg1, <4 x i32> %arg2, <4 x i64> %a3, i8 2)
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_i32gather_pd:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vgatherdpd %xmm2, (%eax,%xmm0,2), %xmm1
|
|
; X32-NEXT: vmovapd %xmm1, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_i32gather_pd:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm0,2), %xmm1
|
|
; X64-NEXT: vmovapd %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast double *%a0 to i8*
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%cmp = fcmp oeq <2 x double> zeroinitializer, zeroinitializer
|
|
%sext = sext <2 x i1> %cmp to <2 x i64>
|
|
%mask = bitcast <2 x i64> %sext to <2 x double>
|
|
%res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> undef, i8* %arg0, <4 x i32> %arg1, <2 x double> %mask, i8 2)
|
|
ret <2 x double> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*, <4 x i32>, <2 x double>, i8) nounwind readonly
|
|
|
|
define <2 x double> @test_mm_mask_i32gather_pd(<2 x double> %a0, double *%a1, <2 x i64> %a2, <2 x double> %a3) {
|
|
; X32-LABEL: test_mm_mask_i32gather_pd:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherdpd %xmm2, (%eax,%xmm1,2), %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_mask_i32gather_pd:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast double *%a1 to i8*
|
|
%arg2 = bitcast <2 x i64> %a2 to <4 x i32>
|
|
%res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> %a0, i8* %arg1, <4 x i32> %arg2, <2 x double> %a3, i8 2)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <4 x double> @test_mm256_i32gather_pd(double *%a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_i32gather_pd:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
|
|
; X32-NEXT: vgatherdpd %ymm2, (%eax,%xmm0,2), %ymm1
|
|
; X32-NEXT: vmovapd %ymm1, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_i32gather_pd:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
|
|
; X64-NEXT: vgatherdpd %ymm2, (%rdi,%xmm0,2), %ymm1
|
|
; X64-NEXT: vmovapd %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast double *%a0 to i8*
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%mask = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> zeroinitializer, <4 x double> zeroinitializer, i8 0)
|
|
%res = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef, i8* %arg0, <4 x i32> %arg1, <4 x double> %mask, i8 2)
|
|
ret <4 x double> %res
|
|
}
|
|
declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*, <4 x i32>, <4 x double>, i8) nounwind readonly
|
|
|
|
define <4 x double> @test_mm256_mask_i32gather_pd(<4 x double> %a0, double *%a1, <2 x i64> %a2, <4 x double> %a3) {
|
|
; X32-LABEL: test_mm256_mask_i32gather_pd:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherdpd %ymm2, (%eax,%xmm1,2), %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mask_i32gather_pd:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,2), %ymm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast double *%a1 to i8*
|
|
%arg2 = bitcast <2 x i64> %a2 to <4 x i32>
|
|
%res = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %a0, i8* %arg1, <4 x i32> %arg2, <4 x double> %a3, i8 2)
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
define <4 x float> @test_mm_i32gather_ps(float *%a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_i32gather_ps:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vgatherdps %xmm2, (%eax,%xmm0,2), %xmm1
|
|
; X32-NEXT: vmovaps %xmm1, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_i32gather_ps:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm0,2), %xmm1
|
|
; X64-NEXT: vmovaps %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast float *%a0 to i8*
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%cmp = fcmp oeq <4 x float> zeroinitializer, zeroinitializer
|
|
%sext = sext <4 x i1> %cmp to <4 x i32>
|
|
%mask = bitcast <4 x i32> %sext to <4 x float>
|
|
%call = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> undef, i8* %arg0, <4 x i32> %arg1, <4 x float> %mask, i8 2)
|
|
ret <4 x float> %call
|
|
}
|
|
declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*, <4 x i32>, <4 x float>, i8) nounwind readonly
|
|
|
|
define <4 x float> @test_mm_mask_i32gather_ps(<4 x float> %a0, float *%a1, <2 x i64> %a2, <4 x float> %a3) {
|
|
; X32-LABEL: test_mm_mask_i32gather_ps:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherdps %xmm2, (%eax,%xmm1,2), %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_mask_i32gather_ps:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast float *%a1 to i8*
|
|
%arg2 = bitcast <2 x i64> %a2 to <4 x i32>
|
|
%call = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %a0, i8* %arg1, <4 x i32> %arg2, <4 x float> %a3, i8 2)
|
|
ret <4 x float> %call
|
|
}
|
|
|
|
define <8 x float> @test_mm256_i32gather_ps(float *%a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_i32gather_ps:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vcmpeqps %ymm1, %ymm1, %ymm2
|
|
; X32-NEXT: vgatherdps %ymm2, (%eax,%ymm0,2), %ymm1
|
|
; X32-NEXT: vmovaps %ymm1, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_i32gather_ps:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vcmpeqps %ymm1, %ymm1, %ymm2
|
|
; X64-NEXT: vgatherdps %ymm2, (%rdi,%ymm0,2), %ymm1
|
|
; X64-NEXT: vmovaps %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast float *%a0 to i8*
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%mask = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> zeroinitializer, <8 x float> zeroinitializer, i8 0)
|
|
%call = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arg0, <8 x i32> %arg1, <8 x float> %mask, i8 2)
|
|
ret <8 x float> %call
|
|
}
|
|
declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*, <8 x i32>, <8 x float>, i8) nounwind readonly
|
|
|
|
define <8 x float> @test_mm256_mask_i32gather_ps(<8 x float> %a0, float *%a1, <4 x i64> %a2, <8 x float> %a3) {
|
|
; X32-LABEL: test_mm256_mask_i32gather_ps:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherdps %ymm2, (%eax,%ymm1,2), %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mask_i32gather_ps:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,2), %ymm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast float *%a1 to i8*
|
|
%arg2 = bitcast <4 x i64> %a2 to <8 x i32>
|
|
%call = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0, i8* %arg1, <8 x i32> %arg2, <8 x float> %a3, i8 2)
|
|
ret <8 x float> %call
|
|
}
|
|
|
|
define <2 x i64> @test_mm_i64gather_epi32(i32 *%a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_i64gather_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vpgatherqd %xmm2, (%eax,%xmm0,2), %xmm1
|
|
; X32-NEXT: vmovdqa %xmm1, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_i64gather_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%xmm0,2), %xmm1
|
|
; X64-NEXT: vmovdqa %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i32 *%a0 to i8*
|
|
%mask = bitcast <2 x i64> <i64 -1, i64 -1> to <4 x i32>
|
|
%call = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> undef, i8* %arg0, <2 x i64> %a1, <4 x i32> %mask, i8 2)
|
|
%bc = bitcast <4 x i32> %call to <2 x i64>
|
|
ret <2 x i64> %bc
|
|
}
|
|
declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, i8*, <2 x i64>, <4 x i32>, i8) nounwind readonly
|
|
|
|
define <2 x i64> @test_mm_mask_i64gather_epi32(<2 x i64> %a0, i32 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
|
|
; X32-LABEL: test_mm_mask_i64gather_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpgatherqd %xmm2, (%eax,%xmm1,2), %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_mask_i64gather_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
|
|
%arg1 = bitcast i32 *%a1 to i8*
|
|
%arg3 = bitcast <2 x i64> %a3 to <4 x i32>
|
|
%call = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> %arg0, i8* %arg1, <2 x i64> %a2, <4 x i32> %arg3, i8 2)
|
|
%bc = bitcast <4 x i32> %call to <2 x i64>
|
|
ret <2 x i64> %bc
|
|
}
|
|
|
|
define <2 x i64> @test_mm256_i64gather_epi32(i32 *%a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_i64gather_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vpgatherqd %xmm2, (%eax,%ymm0,2), %xmm1
|
|
; X32-NEXT: vmovdqa %xmm1, %xmm0
|
|
; X32-NEXT: vzeroupper
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_i64gather_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%ymm0,2), %xmm1
|
|
; X64-NEXT: vmovdqa %xmm1, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i32 *%a0 to i8*
|
|
%mask = bitcast <2 x i64> <i64 -1, i64 -1> to <4 x i32>
|
|
%call = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> undef, i8* %arg0, <4 x i64> %a1, <4 x i32> %mask, i8 2)
|
|
%bc = bitcast <4 x i32> %call to <2 x i64>
|
|
ret <2 x i64> %bc
|
|
}
|
|
declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, i8*, <4 x i64>, <4 x i32>, i8) nounwind readonly
|
|
|
|
define <2 x i64> @test_mm256_mask_i64gather_epi32(<2 x i64> %a0, i32 *%a1, <4 x i64> %a2, <2 x i64> %a3) {
|
|
; X32-LABEL: test_mm256_mask_i64gather_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpgatherqd %xmm2, (%eax,%ymm1,2), %xmm0
|
|
; X32-NEXT: vzeroupper
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mask_i64gather_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
|
|
%arg1 = bitcast i32 *%a1 to i8*
|
|
%arg3 = bitcast <2 x i64> %a3 to <4 x i32>
|
|
%call = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %arg0, i8* %arg1, <4 x i64> %a2, <4 x i32> %arg3, i8 2)
|
|
%bc = bitcast <4 x i32> %call to <2 x i64>
|
|
ret <2 x i64> %bc
|
|
}
|
|
|
|
define <2 x i64> @test_mm_i64gather_epi64(i64 *%a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_i64gather_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vpgatherqq %xmm2, (%eax,%xmm0,2), %xmm1
|
|
; X32-NEXT: vmovdqa %xmm1, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_i64gather_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vpgatherqq %xmm2, (%rdi,%xmm0,2), %xmm1
|
|
; X64-NEXT: vmovdqa %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i64 *%a0 to i8*
|
|
%call = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> undef, i8* %arg0, <2 x i64> %a1, <2 x i64> <i64 -1, i64 -1>, i8 2)
|
|
ret <2 x i64> %call
|
|
}
|
|
declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, i8*, <2 x i64>, <2 x i64>, i8) nounwind readonly
|
|
|
|
define <2 x i64> @test_mm_mask_i64gather_epi64(<2 x i64> %a0, i64 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
|
|
; X32-LABEL: test_mm_mask_i64gather_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpgatherqq %xmm2, (%eax,%xmm1,2), %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_mask_i64gather_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast i64 *%a1 to i8*
|
|
%call = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0, i8* %arg1, <2 x i64> %a2, <2 x i64> %a3, i8 2)
|
|
ret <2 x i64> %call
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_i64gather_epi64(i64 *%a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_i64gather_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
|
|
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vpgatherqq %ymm2, (%eax,%ymm0,2), %ymm1
|
|
; X32-NEXT: vmovdqa %ymm1, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_i64gather_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
|
|
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vpgatherqq %ymm2, (%rdi,%ymm0,2), %ymm1
|
|
; X64-NEXT: vmovdqa %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i64 *%a0 to i8*
|
|
%call = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> undef, i8* %arg0, <4 x i64> %a1, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, i8 2)
|
|
ret <4 x i64> %call
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, i8*, <4 x i64>, <4 x i64>, i8) nounwind readonly
|
|
|
|
define <4 x i64> @test_mm256_mask_i64gather_epi64(<4 x i64> %a0, i64 *%a1, <4 x i64> %a2, <4 x i64> %a3) {
|
|
; X32-LABEL: test_mm256_mask_i64gather_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpgatherqq %ymm2, (%eax,%ymm1,2), %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mask_i64gather_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast i64 *%a1 to i8*
|
|
%call = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %a0, i8* %arg1, <4 x i64> %a2, <4 x i64> %a3, i8 2)
|
|
ret <4 x i64> %call
|
|
}
|
|
|
|
define <2 x double> @test_mm_i64gather_pd(double *%a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_i64gather_pd:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vgatherqpd %xmm2, (%eax,%xmm0,2), %xmm1
|
|
; X32-NEXT: vmovapd %xmm1, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_i64gather_pd:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm0,2), %xmm1
|
|
; X64-NEXT: vmovapd %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast double *%a0 to i8*
|
|
%cmp = fcmp oeq <2 x double> zeroinitializer, zeroinitializer
|
|
%sext = sext <2 x i1> %cmp to <2 x i64>
|
|
%mask = bitcast <2 x i64> %sext to <2 x double>
|
|
%call = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> undef, i8* %arg0, <2 x i64> %a1, <2 x double> %mask, i8 2)
|
|
ret <2 x double> %call
|
|
}
|
|
declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, i8*, <2 x i64>, <2 x double>, i8) nounwind readonly
|
|
|
|
define <2 x double> @test_mm_mask_i64gather_pd(<2 x double> %a0, double *%a1, <2 x i64> %a2, <2 x double> %a3) {
|
|
; X32-LABEL: test_mm_mask_i64gather_pd:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherqpd %xmm2, (%eax,%xmm1,2), %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_mask_i64gather_pd:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast double *%a1 to i8*
|
|
%call = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> %a0, i8* %arg1, <2 x i64> %a2, <2 x double> %a3, i8 2)
|
|
ret <2 x double> %call
|
|
}
|
|
|
|
define <4 x double> @test_mm256_i64gather_pd(double *%a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_i64gather_pd:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
|
|
; X32-NEXT: vgatherqpd %ymm2, (%eax,%ymm0,2), %ymm1
|
|
; X32-NEXT: vmovapd %ymm1, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_i64gather_pd:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vcmpeqpd %ymm1, %ymm1, %ymm2
|
|
; X64-NEXT: vgatherqpd %ymm2, (%rdi,%ymm0,2), %ymm1
|
|
; X64-NEXT: vmovapd %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast double *%a0 to i8*
|
|
%mask = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> zeroinitializer, <4 x double> zeroinitializer, i8 0)
|
|
%call = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> undef, i8* %arg0, <4 x i64> %a1, <4 x double> %mask, i8 2)
|
|
ret <4 x double> %call
|
|
}
|
|
declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, i8*, <4 x i64>, <4 x double>, i8) nounwind readonly
|
|
|
|
define <4 x double> @test_mm256_mask_i64gather_pd(<4 x double> %a0, i64 *%a1, <4 x i64> %a2, <4 x double> %a3) {
|
|
; X32-LABEL: test_mm256_mask_i64gather_pd:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherqpd %ymm2, (%eax,%ymm1,2), %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mask_i64gather_pd:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,2), %ymm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast i64 *%a1 to i8*
|
|
%call = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %a0, i8* %arg1, <4 x i64> %a2, <4 x double> %a3, i8 2)
|
|
ret <4 x double> %call
|
|
}
|
|
|
|
define <4 x float> @test_mm_i64gather_ps(float *%a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_i64gather_ps:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vgatherqps %xmm2, (%eax,%xmm0,2), %xmm1
|
|
; X32-NEXT: vmovaps %xmm1, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_i64gather_ps:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm0,2), %xmm1
|
|
; X64-NEXT: vmovaps %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast float *%a0 to i8*
|
|
%cmp = fcmp oeq <4 x float> zeroinitializer, zeroinitializer
|
|
%sext = sext <4 x i1> %cmp to <4 x i32>
|
|
%mask = bitcast <4 x i32> %sext to <4 x float>
|
|
%call = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> undef, i8* %arg0, <2 x i64> %a1, <4 x float> %mask, i8 2)
|
|
ret <4 x float> %call
|
|
}
|
|
declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, i8*, <2 x i64>, <4 x float>, i8) nounwind readonly
|
|
|
|
define <4 x float> @test_mm_mask_i64gather_ps(<4 x float> %a0, float *%a1, <2 x i64> %a2, <4 x float> %a3) {
|
|
; X32-LABEL: test_mm_mask_i64gather_ps:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherqps %xmm2, (%eax,%xmm1,2), %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_mask_i64gather_ps:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast float *%a1 to i8*
|
|
%call = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> %a0, i8* %arg1, <2 x i64> %a2, <4 x float> %a3, i8 2)
|
|
ret <4 x float> %call
|
|
}
|
|
|
|
define <4 x float> @test_mm256_i64gather_ps(float *%a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_i64gather_ps:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; X32-NEXT: vgatherqps %xmm2, (%eax,%ymm0,2), %xmm1
|
|
; X32-NEXT: vmovaps %xmm1, %xmm0
|
|
; X32-NEXT: vzeroupper
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_i64gather_ps:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm0,2), %xmm1
|
|
; X64-NEXT: vmovaps %xmm1, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast float *%a0 to i8*
|
|
%cmp = fcmp oeq <4 x float> zeroinitializer, zeroinitializer
|
|
%sext = sext <4 x i1> %cmp to <4 x i32>
|
|
%mask = bitcast <4 x i32> %sext to <4 x float>
|
|
%call = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> undef, i8* %arg0, <4 x i64> %a1, <4 x float> %mask, i8 2)
|
|
ret <4 x float> %call
|
|
}
|
|
declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, i8*, <4 x i64>, <4 x float>, i8) nounwind readonly
|
|
|
|
define <4 x float> @test_mm256_mask_i64gather_ps(<4 x float> %a0, float *%a1, <4 x i64> %a2, <4 x float> %a3) {
|
|
; X32-LABEL: test_mm256_mask_i64gather_ps:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherqps %xmm2, (%eax,%ymm1,2), %xmm0
|
|
; X32-NEXT: vzeroupper
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mask_i64gather_ps:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,2), %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast float *%a1 to i8*
|
|
%call = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %a0, i8* %arg1, <4 x i64> %a2, <4 x float> %a3, i8 2)
|
|
ret <4 x float> %call
|
|
}
|
|
|
|
define <4 x i64> @test0_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
|
|
; X32-LABEL: test0_mm256_inserti128_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
|
|
; X32-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test0_mm256_inserti128_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
|
|
; X64-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
|
|
; X64-NEXT: retq
|
|
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
|
|
%res = shufflevector <4 x i64> %a0, <4 x i64> %ext, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test1_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
|
|
; X32-LABEL: test1_mm256_inserti128_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test1_mm256_inserti128_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
|
|
%res = shufflevector <4 x i64> %a0, <4 x i64> %ext, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_madd_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_madd_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_madd_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_maddubs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_maddubs_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_maddubs_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%res = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %arg0, <32 x i8> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind readnone
|
|
|
|
define <2 x i64> @test_mm_maskload_epi32(i32* %a0, <2 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm_maskload_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpmaskmovd (%eax), %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_maskload_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i32* %a0 to i8*
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%call = call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %arg0, <4 x i32> %arg1)
|
|
%bc = bitcast <4 x i32> %call to <2 x i64>
|
|
ret <2 x i64> %bc
|
|
}
|
|
declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>) nounwind readonly
|
|
|
|
define <4 x i64> @test_mm256_maskload_epi32(i32* %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_maskload_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpmaskmovd (%eax), %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_maskload_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i32* %a0 to i8*
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%call = call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %arg0, <8 x i32> %arg1)
|
|
%bc = bitcast <8 x i32> %call to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>) nounwind readonly
|
|
|
|
define <2 x i64> @test_mm_maskload_epi64(i64* %a0, <2 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm_maskload_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpmaskmovq (%eax), %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_maskload_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i64* %a0 to i8*
|
|
%res = call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %arg0, <2 x i64> %a1)
|
|
ret <2 x i64> %res
|
|
}
|
|
declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>) nounwind readonly
|
|
|
|
define <4 x i64> @test_mm256_maskload_epi64(i64* %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_maskload_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpmaskmovq (%eax), %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_maskload_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i64* %a0 to i8*
|
|
%res = call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %arg0, <4 x i64> %a1)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>) nounwind readonly
|
|
|
|
define void @test_mm_maskstore_epi32(float* %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
|
|
; X32-LABEL: test_mm_maskstore_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpmaskmovd %xmm1, %xmm0, (%eax)
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_maskstore_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast float* %a0 to i8*
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%arg2 = bitcast <2 x i64> %a2 to <4 x i32>
|
|
call void @llvm.x86.avx2.maskstore.d(i8* %arg0, <4 x i32> %arg1, <4 x i32> %arg2)
|
|
ret void
|
|
}
|
|
declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>) nounwind readnone
|
|
|
|
define void @test_mm256_maskstore_epi32(float* %a0, <4 x i64> %a1, <4 x i64> %a2) nounwind {
|
|
; X32-LABEL: test_mm256_maskstore_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpmaskmovd %ymm1, %ymm0, (%eax)
|
|
; X32-NEXT: vzeroupper
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_maskstore_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi)
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast float* %a0 to i8*
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%arg2 = bitcast <4 x i64> %a2 to <8 x i32>
|
|
call void @llvm.x86.avx2.maskstore.d.256(i8* %arg0, <8 x i32> %arg1, <8 x i32> %arg2)
|
|
ret void
|
|
}
|
|
declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>) nounwind readnone
|
|
|
|
define void @test_mm_maskstore_epi64(i64* %a0, <2 x i64> %a1, <2 x i64> %a2) nounwind {
|
|
; X32-LABEL: test_mm_maskstore_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpmaskmovq %xmm1, %xmm0, (%eax)
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_maskstore_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi)
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i64* %a0 to i8*
|
|
call void @llvm.x86.avx2.maskstore.q(i8* %arg0, <2 x i64> %a1, <2 x i64> %a2)
|
|
ret void
|
|
}
|
|
declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>) nounwind readnone
|
|
|
|
define void @test_mm256_maskstore_epi64(i64* %a0, <4 x i64> %a1, <4 x i64> %a2) nounwind {
|
|
; X32-LABEL: test_mm256_maskstore_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vpmaskmovq %ymm1, %ymm0, (%eax)
|
|
; X32-NEXT: vzeroupper
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_maskstore_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi)
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast i64* %a0 to i8*
|
|
call void @llvm.x86.avx2.maskstore.q.256(i8* %arg0, <4 x i64> %a1, <4 x i64> %a2)
|
|
ret void
|
|
}
|
|
declare void @llvm.x86.avx2.maskstore.q.256(i8*, <4 x i64>, <4 x i64>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_max_epi8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_max_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_max_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%cmp = icmp sgt <32 x i8> %arg0, %arg1
|
|
%sel = select <32 x i1> %cmp, <32 x i8> %arg0, <32 x i8> %arg1
|
|
%bc = bitcast <32 x i8> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_max_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_max_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_max_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%cmp = icmp sgt <16 x i16> %arg0, %arg1
|
|
%sel = select <16 x i1> %cmp, <16 x i16> %arg0, <16 x i16> %arg1
|
|
%bc = bitcast <16 x i16> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_max_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_max_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_max_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%cmp = icmp sgt <8 x i32> %arg0, %arg1
|
|
%sel = select <8 x i1> %cmp, <8 x i32> %arg0, <8 x i32> %arg1
|
|
%bc = bitcast <8 x i32> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_max_epu8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_max_epu8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_max_epu8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%cmp = icmp ugt <32 x i8> %arg0, %arg1
|
|
%sel = select <32 x i1> %cmp, <32 x i8> %arg0, <32 x i8> %arg1
|
|
%bc = bitcast <32 x i8> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_max_epu16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_max_epu16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_max_epu16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%cmp = icmp ugt <16 x i16> %arg0, %arg1
|
|
%sel = select <16 x i1> %cmp, <16 x i16> %arg0, <16 x i16> %arg1
|
|
%bc = bitcast <16 x i16> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_max_epu32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_max_epu32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_max_epu32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%cmp = icmp ugt <8 x i32> %arg0, %arg1
|
|
%sel = select <8 x i1> %cmp, <8 x i32> %arg0, <8 x i32> %arg1
|
|
%bc = bitcast <8 x i32> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_min_epi8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_min_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpminsb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_min_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpminsb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%cmp = icmp slt <32 x i8> %arg0, %arg1
|
|
%sel = select <32 x i1> %cmp, <32 x i8> %arg0, <32 x i8> %arg1
|
|
%bc = bitcast <32 x i8> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_min_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_min_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpminsw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_min_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpminsw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%cmp = icmp slt <16 x i16> %arg0, %arg1
|
|
%sel = select <16 x i1> %cmp, <16 x i16> %arg0, <16 x i16> %arg1
|
|
%bc = bitcast <16 x i16> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_min_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_min_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpminsd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_min_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpminsd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%cmp = icmp slt <8 x i32> %arg0, %arg1
|
|
%sel = select <8 x i1> %cmp, <8 x i32> %arg0, <8 x i32> %arg1
|
|
%bc = bitcast <8 x i32> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_min_epu8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_min_epu8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpminub %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_min_epu8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpminub %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%cmp = icmp ult <32 x i8> %arg0, %arg1
|
|
%sel = select <32 x i1> %cmp, <32 x i8> %arg0, <32 x i8> %arg1
|
|
%bc = bitcast <32 x i8> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_min_epu16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_min_epu16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpminuw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_min_epu16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpminuw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%cmp = icmp ult <16 x i16> %arg0, %arg1
|
|
%sel = select <16 x i1> %cmp, <16 x i16> %arg0, <16 x i16> %arg1
|
|
%bc = bitcast <16 x i16> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_min_epu32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_min_epu32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_min_epu32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpminud %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%cmp = icmp ult <8 x i32> %arg0, %arg1
|
|
%sel = select <8 x i1> %cmp, <8 x i32> %arg0, <8 x i32> %arg1
|
|
%bc = bitcast <8 x i32> %sel to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define i32 @test_mm256_movemask_epi8(<4 x i64> %a0) nounwind {
|
|
; X32-LABEL: test_mm256_movemask_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmovmskb %ymm0, %eax
|
|
; X32-NEXT: vzeroupper
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_movemask_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmovmskb %ymm0, %eax
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%res = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> %arg0)
|
|
ret i32 %res
|
|
}
|
|
declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_mpsadbw_epu8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_mpsadbw_epu8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vmpsadbw $3, %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mpsadbw_epu8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vmpsadbw $3, %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%call = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %arg0, <32 x i8> %arg1, i8 3)
|
|
%bc = bitcast <16 x i16> %call to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_mul_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_mul_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mul_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> %arg0, <8 x i32> %arg1)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32>, <8 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_mul_epu32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_mul_epu32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mul_epu32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %arg0, <8 x i32> %arg1)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32>, <8 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_mulhi_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_mulhi_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmulhw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mulhi_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmulhw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_mulhi_epu16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_mulhi_epu16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mulhi_epu16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_mulhrs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_mulhrs_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mulhrs_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_mullo_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_mullo_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mullo_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = mul <16 x i16> %arg0, %arg1
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_mullo_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_mullo_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_mullo_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = mul <8 x i32> %arg0, %arg1
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_or_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_or_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vorps %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_or_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vorps %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = or <4 x i64> %a0, %a1
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_packs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_packs_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_packs_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%call = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%res = bitcast <32 x i8> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_packs_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_packs_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_packs_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%call = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %arg0, <8 x i32> %arg1)
|
|
%res = bitcast <16 x i16> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_packus_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_packus_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_packus_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%call = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%res = bitcast <32 x i8> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_packus_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_packus_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_packus_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%call = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %arg0, <8 x i32> %arg1)
|
|
%res = bitcast <16 x i16> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_permute2x128_si256(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_permute2x128_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_permute2x128_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
|
|
; X64-NEXT: retq
|
|
%res = call <4 x i64> @llvm.x86.avx2.vperm2i128(<4 x i64> %a0, <4 x i64> %a1, i8 49)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.vperm2i128(<4 x i64>, <4 x i64>, i8) nounwind readonly
|
|
|
|
define <4 x i64> @test_mm256_permute4x64_epi64(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_permute4x64_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,0,2,0]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_permute4x64_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,0,2,0]
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 0, i32 2, i32 0>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x double> @test_mm256_permute4x64_pd(<4 x double> %a0) {
|
|
; X32-LABEL: test_mm256_permute4x64_pd:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,1,0]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_permute4x64_pd:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,2,1,0]
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 2, i32 1, i32 0>
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_permutevar8x32_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_permutevar8x32_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_permutevar8x32_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%call = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %arg0, <8 x i32> %arg1)
|
|
%res = bitcast <8 x i32> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
|
|
|
|
define <8 x float> @test_mm256_permutevar8x32_ps(<8 x float> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_permutevar8x32_ps:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_permutevar8x32_ps:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> %arg1)
|
|
ret <8 x float> %res
|
|
}
|
|
declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind readonly
|
|
|
|
define <4 x i64> @test_mm256_sad_epu8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_sad_epu8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sad_epu8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%res = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %arg0, <32 x i8> %arg1)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_shuffle_epi32(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_shuffle_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,3,0,0,7,7,4,4]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_shuffle_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,3,0,0,7,7,4,4]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%shuf = shufflevector <8 x i32> %arg0, <8 x i32> undef, <8 x i32> <i32 3, i32 3, i32 0, i32 0, i32 7, i32 7, i32 4, i32 4>
|
|
%res = bitcast <8 x i32> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_shuffle_epi8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_shuffle_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpshufb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_shuffle_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpshufb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%shuf = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %arg0, <32 x i8> %arg1)
|
|
%res = bitcast <32 x i8> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_shufflehi_epi16(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_shufflehi_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,6,5,8,9,10,11,15,14,14,13]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_shufflehi_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,6,5,8,9,10,11,15,14,14,13]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%shuf = shufflevector <16 x i16> %arg0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 6, i32 5, i32 8, i32 9, i32 10, i32 11, i32 15, i32 14, i32 14, i32 13>
|
|
%res = bitcast <16 x i16> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_shufflelo_epi16(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_shufflelo_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,0,1,1,4,5,6,7,11,8,9,9,12,13,14,15]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_shufflelo_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,0,1,1,4,5,6,7,11,8,9,9,12,13,14,15]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%shuf = shufflevector <16 x i16> %arg0, <16 x i16> undef, <16 x i32> <i32 3, i32 0, i32 1, i32 1, i32 4, i32 5, i32 6, i32 7, i32 11, i32 8, i32 9, i32 9, i32 12, i32 13, i32 14, i32 15>
|
|
%res = bitcast <16 x i16> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_sign_epi8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_sign_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsignb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sign_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsignb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%call = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %arg0, <32 x i8> %arg1)
|
|
%res = bitcast <32 x i8> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_sign_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_sign_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsignw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sign_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsignw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%call = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%res = bitcast <16 x i16> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_sign_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_sign_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsignd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sign_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsignd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%call = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %arg0, <8 x i32> %arg1)
|
|
%res = bitcast <8 x i32> %call to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_sll_epi16(<4 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_sll_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsllw %xmm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sll_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsllw %xmm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <2 x i64> %a1 to <8 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %arg0, <8 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_sll_epi32(<4 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_sll_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpslld %xmm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sll_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpslld %xmm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%res = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %arg0, <4 x i32> %arg1)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_sll_epi64(<4 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_sll_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsllq %xmm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sll_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsllq %xmm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_slli_epi16(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_slli_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsllw $3, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_slli_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsllw $3, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %arg0, i32 3)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16>, i32) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_slli_epi32(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_slli_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpslld $3, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_slli_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpslld $3, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%res = call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %arg0, i32 3)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32>, i32) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_slli_epi64(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_slli_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsllq $3, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_slli_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsllq $3, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %a0, i32 3)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64>, i32) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_slli_si256(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_slli_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_slli_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%shuf = shufflevector <32 x i8> zeroinitializer, <32 x i8> %arg0, <32 x i32> <i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60>
|
|
%res = bitcast <32 x i8> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <2 x i64> @test_mm_sllv_epi32(<2 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_sllv_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_sllv_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%res = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %arg0, <4 x i32> %arg1)
|
|
%bc = bitcast <4 x i32> %res to <2 x i64>
|
|
ret <2 x i64> %bc
|
|
}
|
|
declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_sllv_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_sllv_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sllv_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %arg0, <8 x i32> %arg1)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
|
|
|
|
define <2 x i64> @test_mm_sllv_epi64(<2 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_sllv_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_sllv_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1)
|
|
ret <2 x i64> %res
|
|
}
|
|
declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_sllv_epi64(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_sllv_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sllv_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_sra_epi16(<4 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_sra_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsraw %xmm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sra_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsraw %xmm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <2 x i64> %a1 to <8 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %arg0, <8 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_sra_epi32(<4 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_sra_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrad %xmm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sra_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrad %xmm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%res = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %arg0, <4 x i32> %arg1)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srai_epi16(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_srai_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsraw $3, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srai_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsraw $3, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %arg0, i32 3)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16>, i32) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srai_epi32(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_srai_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrad $3, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srai_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrad $3, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%res = call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %arg0, i32 3)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32>, i32) nounwind readnone
|
|
|
|
define <2 x i64> @test_mm_srav_epi32(<2 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_srav_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsravd %xmm1, %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_srav_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsravd %xmm1, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %arg0, <4 x i32> %arg1)
|
|
%bc = bitcast <4 x i32> %res to <2 x i64>
|
|
ret <2 x i64> %bc
|
|
}
|
|
declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srav_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_srav_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srav_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %arg0, <8 x i32> %arg1)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srl_epi16(<4 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_srl_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srl_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <2 x i64> %a1 to <8 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %arg0, <8 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srl_epi32(<4 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_srl_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrld %xmm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srl_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrld %xmm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%res = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %arg0, <4 x i32> %arg1)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srl_epi64(<4 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_srl_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srl_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srli_epi16(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_srli_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srli_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrlw $3, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %arg0, i32 3)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srli_epi32(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_srli_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrld $3, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srli_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrld $3, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%res = call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %arg0, i32 3)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32>, i32) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srli_epi64(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_srli_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrlq $3, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srli_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrlq $3, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %a0, i32 3)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64>, i32) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srli_si256(<4 x i64> %a0) {
|
|
; X32-LABEL: test_mm256_srli_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srli_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%shuf = shufflevector <32 x i8> %arg0, <32 x i8> zeroinitializer, <32 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50>
|
|
%res = bitcast <32 x i8> %shuf to <4 x i64>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <2 x i64> @test_mm_srlv_epi32(<2 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_srlv_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_srlv_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
|
|
%arg1 = bitcast <2 x i64> %a1 to <4 x i32>
|
|
%res = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %arg0, <4 x i32> %arg1)
|
|
%bc = bitcast <4 x i32> %res to <2 x i64>
|
|
ret <2 x i64> %bc
|
|
}
|
|
declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srlv_epi32(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_srlv_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srlv_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %arg0, <8 x i32> %arg1)
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
|
|
|
|
define <2 x i64> @test_mm_srlv_epi64(<2 x i64> %a0, <2 x i64> %a1) {
|
|
; X32-LABEL: test_mm_srlv_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm_srlv_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1)
|
|
ret <2 x i64> %res
|
|
}
|
|
declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_srlv_epi64(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_srlv_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_srlv_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_stream_load_si256(<4 x i64> *%a0) {
|
|
; X32-LABEL: test_mm256_stream_load_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vmovntdqa (%eax), %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_stream_load_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> *%a0 to i8*
|
|
%res = call <4 x i64> @llvm.x86.avx2.movntdqa(i8* %arg0)
|
|
ret <4 x i64> %res
|
|
}
|
|
declare <4 x i64> @llvm.x86.avx2.movntdqa(i8*) nounwind readonly
|
|
|
|
define <4 x i64> @test_mm256_sub_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_sub_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sub_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%res = sub <32 x i8> %arg0, %arg1
|
|
%bc = bitcast <32 x i8> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_sub_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_sub_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsubw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sub_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsubw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = sub <16 x i16> %arg0, %arg1
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_sub_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_sub_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsubd %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sub_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsubd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = sub <8 x i32> %arg0, %arg1
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_sub_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_sub_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsubq %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_sub_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsubq %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = sub <4 x i64> %a0, %a1
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_subs_epi8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_subs_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsubsb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_subs_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsubsb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%res = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %arg0, <32 x i8> %arg1)
|
|
%bc = bitcast <32 x i8> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_subs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_subs_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsubsw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_subs_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsubsw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_subs_epu8(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_subs_epu8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsubusb %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_subs_epu8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsubusb %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%res = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %arg0, <32 x i8> %arg1)
|
|
%bc = bitcast <32 x i8> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_subs_epu16(<4 x i64> %a0, <4 x i64> %a1) {
|
|
; X32-LABEL: test_mm256_subs_epu16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpsubusw %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_subs_epu16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpsubusw %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind readnone
|
|
|
|
define <4 x i64> @test_mm256_unpackhi_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_unpackhi_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_unpackhi_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%res = shufflevector <32 x i8> %arg0, <32 x i8> %arg1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
|
|
%bc = bitcast <32 x i8> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_unpackhi_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_unpackhi_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_unpackhi_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = shufflevector <16 x i16> %arg0, <16 x i16> %arg1, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_unpackhi_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_unpackhi_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_unpackhi_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = shufflevector <8 x i32> %arg0, <8 x i32> %arg1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_unpackhi_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_unpackhi_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_unpackhi_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_unpacklo_epi8(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_unpacklo_epi8:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_unpacklo_epi8:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
|
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
|
%res = shufflevector <32 x i8> %arg0, <32 x i8> %arg1, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
|
|
%bc = bitcast <32 x i8> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_unpacklo_epi16(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_unpacklo_epi16:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_unpacklo_epi16:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
|
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
|
%res = shufflevector <16 x i16> %arg0, <16 x i16> %arg1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27>
|
|
%bc = bitcast <16 x i16> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_unpacklo_epi32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_unpacklo_epi32:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_unpacklo_epi32:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
|
|
; X64-NEXT: retq
|
|
%arg0 = bitcast <4 x i64> %a0 to <8 x i32>
|
|
%arg1 = bitcast <4 x i64> %a1 to <8 x i32>
|
|
%res = shufflevector <8 x i32> %arg0, <8 x i32> %arg1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
|
|
%bc = bitcast <8 x i32> %res to <4 x i64>
|
|
ret <4 x i64> %bc
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_unpacklo_epi64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_unpacklo_epi64:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_unpacklo_epi64:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
|
|
; X64-NEXT: retq
|
|
%res = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
define <4 x i64> @test_mm256_xor_si256(<4 x i64> %a0, <4 x i64> %a1) nounwind {
|
|
; X32-LABEL: test_mm256_xor_si256:
|
|
; X32: # BB#0:
|
|
; X32-NEXT: vxorps %ymm1, %ymm0, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_mm256_xor_si256:
|
|
; X64: # BB#0:
|
|
; X64-NEXT: vxorps %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = xor <4 x i64> %a0, %a1
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
declare <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone
|
|
|
|
declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone
|