forked from OSchip/llvm-project
[X86][SSE] Cleanup SSE4A/SSE41/SSE42 intrinsics tests
Ensure we cover 32/64-bit targets for SSE/AVX/AVX512 cases as necessary Added some missing encoding checks to SSE4A tests llvm-svn: 333828
This commit is contained in:
parent
d93157c1b3
commit
dda8daec73
File diff suppressed because it is too large
Load Diff
|
@ -1,14 +1,32 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse4.1 | FileCheck %s
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
|
||||||
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
|
||||||
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
|
||||||
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
|
||||||
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
|
||||||
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
|
||||||
|
|
||||||
; This test works just like the non-upgrade one except that it only checks
|
; This test works just like the non-upgrade one except that it only checks
|
||||||
; forms which require auto-upgrading.
|
; forms which require auto-upgrading.
|
||||||
|
|
||||||
define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) {
|
define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) {
|
||||||
; CHECK-LABEL: test_x86_sse41_blendpd:
|
; SSE-LABEL: test_x86_sse41_blendpd:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
; SSE-NEXT: blendps $12, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x0c]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ## xmm0 = xmm0[0,1],xmm1[2,3]
|
||||||
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX1-LABEL: test_x86_sse41_blendpd:
|
||||||
|
; AVX1: ## %bb.0:
|
||||||
|
; AVX1-NEXT: vblendps $3, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x03]
|
||||||
|
; AVX1-NEXT: ## xmm0 = xmm0[0,1],xmm1[2,3]
|
||||||
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX512-LABEL: test_x86_sse41_blendpd:
|
||||||
|
; AVX512: ## %bb.0:
|
||||||
|
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf3,0x10,0xc0]
|
||||||
|
; AVX512-NEXT: ## xmm0 = xmm0[0],xmm1[1]
|
||||||
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 6) ; <<2 x double>> [#uses=1]
|
%res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 6) ; <<2 x double>> [#uses=1]
|
||||||
ret <2 x double> %res
|
ret <2 x double> %res
|
||||||
}
|
}
|
||||||
|
@ -16,10 +34,17 @@ declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i32) no
|
||||||
|
|
||||||
|
|
||||||
define <4 x float> @test_x86_sse41_blendps(<4 x float> %a0, <4 x float> %a1) {
|
define <4 x float> @test_x86_sse41_blendps(<4 x float> %a0, <4 x float> %a1) {
|
||||||
; CHECK-LABEL: test_x86_sse41_blendps:
|
; SSE-LABEL: test_x86_sse41_blendps:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
|
; SSE-NEXT: blendps $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0c,0xc1,0x07]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ## xmm0 = xmm1[0,1,2],xmm0[3]
|
||||||
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_blendps:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vblendps $8, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x08]
|
||||||
|
; AVX-NEXT: ## xmm0 = xmm1[0,1,2],xmm0[3]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
|
%res = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
|
||||||
ret <4 x float> %res
|
ret <4 x float> %res
|
||||||
}
|
}
|
||||||
|
@ -27,10 +52,15 @@ declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i32) nounw
|
||||||
|
|
||||||
|
|
||||||
define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) {
|
define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) {
|
||||||
; CHECK-LABEL: test_x86_sse41_dppd:
|
; SSE-LABEL: test_x86_sse41_dppd:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: dppd $7, %xmm1, %xmm0
|
; SSE-NEXT: dppd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x41,0xc1,0x07]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_dppd:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x41,0xc1,0x07]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i32 7) ; <<2 x double>> [#uses=1]
|
%res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i32 7) ; <<2 x double>> [#uses=1]
|
||||||
ret <2 x double> %res
|
ret <2 x double> %res
|
||||||
}
|
}
|
||||||
|
@ -38,10 +68,15 @@ declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i32) nounw
|
||||||
|
|
||||||
|
|
||||||
define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) {
|
define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) {
|
||||||
; CHECK-LABEL: test_x86_sse41_dpps:
|
; SSE-LABEL: test_x86_sse41_dpps:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: dpps $7, %xmm1, %xmm0
|
; SSE-NEXT: dpps $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x40,0xc1,0x07]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_dpps:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x40,0xc1,0x07]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
|
%res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
|
||||||
ret <4 x float> %res
|
ret <4 x float> %res
|
||||||
}
|
}
|
||||||
|
@ -49,10 +84,23 @@ declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i32) nounwind
|
||||||
|
|
||||||
|
|
||||||
define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
|
define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
|
||||||
; CHECK-LABEL: test_x86_sse41_insertps:
|
; SSE-LABEL: test_x86_sse41_insertps:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: insertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3]
|
; SSE-NEXT: insertps $17, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x11]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
|
||||||
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX1-LABEL: test_x86_sse41_insertps:
|
||||||
|
; AVX1: ## %bb.0:
|
||||||
|
; AVX1-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
|
||||||
|
; AVX1-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
|
||||||
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX512-LABEL: test_x86_sse41_insertps:
|
||||||
|
; AVX512: ## %bb.0:
|
||||||
|
; AVX512-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
|
||||||
|
; AVX512-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
|
||||||
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i32 17) ; <<4 x float>> [#uses=1]
|
%res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i32 17) ; <<4 x float>> [#uses=1]
|
||||||
ret <4 x float> %res
|
ret <4 x float> %res
|
||||||
}
|
}
|
||||||
|
@ -60,11 +108,27 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) noun
|
||||||
|
|
||||||
|
|
||||||
define <2 x i64> @test_x86_sse41_movntdqa(<2 x i64>* %a0) {
|
define <2 x i64> @test_x86_sse41_movntdqa(<2 x i64>* %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_movntdqa:
|
; X86-SSE-LABEL: test_x86_sse41_movntdqa:
|
||||||
; CHECK: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; CHECK-NEXT: movntdqa (%eax), %xmm0
|
; X86-SSE-NEXT: movntdqa (%eax), %xmm0 ## encoding: [0x66,0x0f,0x38,0x2a,0x00]
|
||||||
; CHECK-NEXT: retl
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X86-AVX-LABEL: test_x86_sse41_movntdqa:
|
||||||
|
; X86-AVX: ## %bb.0:
|
||||||
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
|
; X86-AVX-NEXT: vmovntdqa (%eax), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2a,0x00]
|
||||||
|
; X86-AVX-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_x86_sse41_movntdqa:
|
||||||
|
; X64-SSE: ## %bb.0:
|
||||||
|
; X64-SSE-NEXT: movntdqa (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x38,0x2a,0x07]
|
||||||
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_x86_sse41_movntdqa:
|
||||||
|
; X64-AVX: ## %bb.0:
|
||||||
|
; X64-AVX-NEXT: vmovntdqa (%rdi), %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2a,0x07]
|
||||||
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||||
%arg0 = bitcast <2 x i64>* %a0 to i8*
|
%arg0 = bitcast <2 x i64>* %a0 to i8*
|
||||||
%res = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %arg0)
|
%res = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %arg0)
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
|
@ -73,10 +137,15 @@ declare <2 x i64> @llvm.x86.sse41.movntdqa(i8*) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
|
define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; CHECK-LABEL: test_x86_sse41_mpsadbw:
|
; SSE-LABEL: test_x86_sse41_mpsadbw:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: mpsadbw $7, %xmm1, %xmm0
|
; SSE-NEXT: mpsadbw $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x42,0xc1,0x07]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_mpsadbw:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x42,0xc1,0x07]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i32 7) ; <<8 x i16>> [#uses=1]
|
%res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i32 7) ; <<8 x i16>> [#uses=1]
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
|
@ -84,10 +153,17 @@ declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i32) nounwind re
|
||||||
|
|
||||||
|
|
||||||
define <8 x i16> @test_x86_sse41_pblendw(<8 x i16> %a0, <8 x i16> %a1) {
|
define <8 x i16> @test_x86_sse41_pblendw(<8 x i16> %a0, <8 x i16> %a1) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pblendw:
|
; SSE-LABEL: test_x86_sse41_pblendw:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
|
; SSE-NEXT: pblendw $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0e,0xc1,0x07]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ## xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
|
||||||
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pblendw:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpblendw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0e,0xc1,0x07]
|
||||||
|
; AVX-NEXT: ## xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 7) ; <<8 x i16>> [#uses=1]
|
%res = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 7) ; <<8 x i16>> [#uses=1]
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
|
@ -95,10 +171,15 @@ declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i32) nounwind re
|
||||||
|
|
||||||
|
|
||||||
define <4 x i32> @test_x86_sse41_pmovsxbd(<16 x i8> %a0) {
|
define <4 x i32> @test_x86_sse41_pmovsxbd(<16 x i8> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovsxbd:
|
; SSE-LABEL: test_x86_sse41_pmovsxbd:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovsxbd %xmm0, %xmm0
|
; SSE-NEXT: pmovsxbd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x21,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovsxbd:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x21,0xc0]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
|
%res = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
|
@ -106,10 +187,15 @@ declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <2 x i64> @test_x86_sse41_pmovsxbq(<16 x i8> %a0) {
|
define <2 x i64> @test_x86_sse41_pmovsxbq(<16 x i8> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovsxbq:
|
; SSE-LABEL: test_x86_sse41_pmovsxbq:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovsxbq %xmm0, %xmm0
|
; SSE-NEXT: pmovsxbq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x22,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovsxbq:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovsxbq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x22,0xc0]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
|
%res = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
}
|
}
|
||||||
|
@ -117,10 +203,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <8 x i16> @test_x86_sse41_pmovsxbw(<16 x i8> %a0) {
|
define <8 x i16> @test_x86_sse41_pmovsxbw(<16 x i8> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovsxbw:
|
; SSE-LABEL: test_x86_sse41_pmovsxbw:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovsxbw %xmm0, %xmm0
|
; SSE-NEXT: pmovsxbw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x20,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovsxbw:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovsxbw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x20,0xc0]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
|
%res = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
|
@ -128,10 +219,15 @@ declare <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <2 x i64> @test_x86_sse41_pmovsxdq(<4 x i32> %a0) {
|
define <2 x i64> @test_x86_sse41_pmovsxdq(<4 x i32> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovsxdq:
|
; SSE-LABEL: test_x86_sse41_pmovsxdq:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovsxdq %xmm0, %xmm0
|
; SSE-NEXT: pmovsxdq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x25,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovsxdq:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovsxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x25,0xc0]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
|
%res = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
}
|
}
|
||||||
|
@ -139,10 +235,15 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <4 x i32> @test_x86_sse41_pmovsxwd(<8 x i16> %a0) {
|
define <4 x i32> @test_x86_sse41_pmovsxwd(<8 x i16> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovsxwd:
|
; SSE-LABEL: test_x86_sse41_pmovsxwd:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovsxwd %xmm0, %xmm0
|
; SSE-NEXT: pmovsxwd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x23,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovsxwd:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x23,0xc0]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
|
%res = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
|
@ -150,10 +251,15 @@ declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <2 x i64> @test_x86_sse41_pmovsxwq(<8 x i16> %a0) {
|
define <2 x i64> @test_x86_sse41_pmovsxwq(<8 x i16> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovsxwq:
|
; SSE-LABEL: test_x86_sse41_pmovsxwq:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovsxwq %xmm0, %xmm0
|
; SSE-NEXT: pmovsxwq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x24,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovsxwq:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovsxwq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x24,0xc0]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
|
%res = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
}
|
}
|
||||||
|
@ -161,10 +267,17 @@ declare <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <4 x i32> @test_x86_sse41_pmovzxbd(<16 x i8> %a0) {
|
define <4 x i32> @test_x86_sse41_pmovzxbd(<16 x i8> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovzxbd:
|
; SSE-LABEL: test_x86_sse41_pmovzxbd:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
; SSE-NEXT: pmovzxbd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x31,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
||||||
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovzxbd:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovzxbd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x31,0xc0]
|
||||||
|
; AVX-NEXT: ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
|
%res = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
|
@ -172,10 +285,17 @@ declare <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <2 x i64> @test_x86_sse41_pmovzxbq(<16 x i8> %a0) {
|
define <2 x i64> @test_x86_sse41_pmovzxbq(<16 x i8> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovzxbq:
|
; SSE-LABEL: test_x86_sse41_pmovzxbq:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
; SSE-NEXT: pmovzxbq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x32,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
||||||
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovzxbq:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovzxbq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x32,0xc0]
|
||||||
|
; AVX-NEXT: ## xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
|
%res = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
}
|
}
|
||||||
|
@ -183,10 +303,17 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <8 x i16> @test_x86_sse41_pmovzxbw(<16 x i8> %a0) {
|
define <8 x i16> @test_x86_sse41_pmovzxbw(<16 x i8> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovzxbw:
|
; SSE-LABEL: test_x86_sse41_pmovzxbw:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
; SSE-NEXT: pmovzxbw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x30,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||||
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovzxbw:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovzxbw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x30,0xc0]
|
||||||
|
; AVX-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
|
%res = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
|
@ -194,10 +321,17 @@ declare <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <2 x i64> @test_x86_sse41_pmovzxdq(<4 x i32> %a0) {
|
define <2 x i64> @test_x86_sse41_pmovzxdq(<4 x i32> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovzxdq:
|
; SSE-LABEL: test_x86_sse41_pmovzxdq:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
; SSE-NEXT: pmovzxdq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x35,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero
|
||||||
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovzxdq:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovzxdq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x35,0xc0]
|
||||||
|
; AVX-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
|
%res = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
}
|
}
|
||||||
|
@ -205,10 +339,17 @@ declare <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <4 x i32> @test_x86_sse41_pmovzxwd(<8 x i16> %a0) {
|
define <4 x i32> @test_x86_sse41_pmovzxwd(<8 x i16> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovzxwd:
|
; SSE-LABEL: test_x86_sse41_pmovzxwd:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
; SSE-NEXT: pmovzxwd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x33,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
||||||
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovzxwd:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovzxwd %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x33,0xc0]
|
||||||
|
; AVX-NEXT: ## xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
|
%res = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
|
@ -216,90 +357,137 @@ declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <2 x i64> @test_x86_sse41_pmovzxwq(<8 x i16> %a0) {
|
define <2 x i64> @test_x86_sse41_pmovzxwq(<8 x i16> %a0) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmovzxwq:
|
; SSE-LABEL: test_x86_sse41_pmovzxwq:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
|
; SSE-NEXT: pmovzxwq %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x34,0xc0]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
|
||||||
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmovzxwq:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmovzxwq %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x34,0xc0]
|
||||||
|
; AVX-NEXT: ## xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
|
%res = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
}
|
}
|
||||||
declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>) nounwind readnone
|
declare <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16>) nounwind readnone
|
||||||
|
|
||||||
define <16 x i8> @max_epi8(<16 x i8> %a0, <16 x i8> %a1) {
|
define <16 x i8> @max_epi8(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; CHECK-LABEL: max_epi8:
|
; SSE-LABEL: max_epi8:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmaxsb %xmm1, %xmm0
|
; SSE-NEXT: pmaxsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3c,0xc1]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: max_epi8:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
|
%res = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
|
||||||
ret <16 x i8> %res
|
ret <16 x i8> %res
|
||||||
}
|
}
|
||||||
declare <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8>, <16 x i8>) nounwind readnone
|
declare <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8>, <16 x i8>) nounwind readnone
|
||||||
|
|
||||||
define <16 x i8> @min_epi8(<16 x i8> %a0, <16 x i8> %a1) {
|
define <16 x i8> @min_epi8(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; CHECK-LABEL: min_epi8:
|
; SSE-LABEL: min_epi8:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pminsb %xmm1, %xmm0
|
; SSE-NEXT: pminsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x38,0xc1]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: min_epi8:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
|
%res = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
|
||||||
ret <16 x i8> %res
|
ret <16 x i8> %res
|
||||||
}
|
}
|
||||||
declare <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8>, <16 x i8>) nounwind readnone
|
declare <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8>, <16 x i8>) nounwind readnone
|
||||||
|
|
||||||
define <8 x i16> @max_epu16(<8 x i16> %a0, <8 x i16> %a1) {
|
define <8 x i16> @max_epu16(<8 x i16> %a0, <8 x i16> %a1) {
|
||||||
; CHECK-LABEL: max_epu16:
|
; SSE-LABEL: max_epu16:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmaxuw %xmm1, %xmm0
|
; SSE-NEXT: pmaxuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3e,0xc1]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: max_epu16:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
|
%res = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
declare <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16>, <8 x i16>) nounwind readnone
|
declare <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16>, <8 x i16>) nounwind readnone
|
||||||
|
|
||||||
define <8 x i16> @min_epu16(<8 x i16> %a0, <8 x i16> %a1) {
|
define <8 x i16> @min_epu16(<8 x i16> %a0, <8 x i16> %a1) {
|
||||||
; CHECK-LABEL: min_epu16:
|
; SSE-LABEL: min_epu16:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pminuw %xmm1, %xmm0
|
; SSE-NEXT: pminuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3a,0xc1]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: min_epu16:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
|
%res = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
declare <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16>, <8 x i16>) nounwind readnone
|
declare <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16>, <8 x i16>) nounwind readnone
|
||||||
|
|
||||||
define <4 x i32> @max_epi32(<4 x i32> %a0, <4 x i32> %a1) {
|
define <4 x i32> @max_epi32(<4 x i32> %a0, <4 x i32> %a1) {
|
||||||
; CHECK-LABEL: max_epi32:
|
; SSE-LABEL: max_epi32:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmaxsd %xmm1, %xmm0
|
; SSE-NEXT: pmaxsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3d,0xc1]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: max_epi32:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
|
%res = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
|
declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
|
||||||
|
|
||||||
define <4 x i32> @min_epi32(<4 x i32> %a0, <4 x i32> %a1) {
|
define <4 x i32> @min_epi32(<4 x i32> %a0, <4 x i32> %a1) {
|
||||||
; CHECK-LABEL: min_epi32:
|
; SSE-LABEL: min_epi32:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pminsd %xmm1, %xmm0
|
; SSE-NEXT: pminsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x39,0xc1]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: min_epi32:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
|
%res = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
|
declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
|
||||||
|
|
||||||
define <4 x i32> @max_epu32(<4 x i32> %a0, <4 x i32> %a1) {
|
define <4 x i32> @max_epu32(<4 x i32> %a0, <4 x i32> %a1) {
|
||||||
; CHECK-LABEL: max_epu32:
|
; SSE-LABEL: max_epu32:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmaxud %xmm1, %xmm0
|
; SSE-NEXT: pmaxud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3f,0xc1]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: max_epu32:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
|
%res = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
|
declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
|
||||||
|
|
||||||
define <4 x i32> @min_epu32(<4 x i32> %a0, <4 x i32> %a1) {
|
define <4 x i32> @min_epu32(<4 x i32> %a0, <4 x i32> %a1) {
|
||||||
; CHECK-LABEL: min_epu32:
|
; SSE-LABEL: min_epu32:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pminud %xmm1, %xmm0
|
; SSE-NEXT: pminud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3b,0xc1]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: min_epu32:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
|
%res = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
|
@ -307,10 +495,15 @@ declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <2 x i64> @test_x86_sse41_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
|
define <2 x i64> @test_x86_sse41_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
|
||||||
; CHECK-LABEL: test_x86_sse41_pmuldq:
|
; SSE-LABEL: test_x86_sse41_pmuldq:
|
||||||
; CHECK: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; CHECK-NEXT: pmuldq %xmm1, %xmm0
|
; SSE-NEXT: pmuldq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x28,0xc1]
|
||||||
; CHECK-NEXT: retl
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_x86_sse41_pmuldq:
|
||||||
|
; AVX: ## %bb.0:
|
||||||
|
; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x28,0xc1]
|
||||||
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1) ; <<2 x i64>> [#uses=1]
|
%res = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1) ; <<2 x i64>> [#uses=1]
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,21 +1,24 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefix=SSE41
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
|
||||||
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=AVX2
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
|
||||||
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=VCHECK --check-prefix=SKX
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
|
||||||
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
|
||||||
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
|
||||||
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
|
||||||
|
|
||||||
define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
|
define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) {
|
||||||
; SSE41-LABEL: test_x86_sse41_blendvpd:
|
; SSE-LABEL: test_x86_sse41_blendvpd:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: movapd %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x28,0xd8]
|
; SSE-NEXT: movapd %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x28,0xd8]
|
||||||
; SSE41-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
|
; SSE-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
|
||||||
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x15,0xd9]
|
; SSE-NEXT: blendvpd %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x15,0xd9]
|
||||||
; SSE41-NEXT: movapd %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x28,0xc3]
|
; SSE-NEXT: movapd %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x28,0xc3]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse41_blendvpd:
|
; AVX-LABEL: test_x86_sse41_blendvpd:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4b,0xc1,0x20]
|
; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4b,0xc1,0x20]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) ; <<2 x double>> [#uses=1]
|
%res = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) ; <<2 x double>> [#uses=1]
|
||||||
ret <2 x double> %res
|
ret <2 x double> %res
|
||||||
}
|
}
|
||||||
|
@ -23,18 +26,18 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d
|
||||||
|
|
||||||
|
|
||||||
define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
|
define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
|
||||||
; SSE41-LABEL: test_x86_sse41_blendvps:
|
; SSE-LABEL: test_x86_sse41_blendvps:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: movaps %xmm0, %xmm3 ## encoding: [0x0f,0x28,0xd8]
|
; SSE-NEXT: movaps %xmm0, %xmm3 ## encoding: [0x0f,0x28,0xd8]
|
||||||
; SSE41-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
|
; SSE-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
|
||||||
; SSE41-NEXT: blendvps %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x14,0xd9]
|
; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x14,0xd9]
|
||||||
; SSE41-NEXT: movaps %xmm3, %xmm0 ## encoding: [0x0f,0x28,0xc3]
|
; SSE-NEXT: movaps %xmm3, %xmm0 ## encoding: [0x0f,0x28,0xc3]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse41_blendvps:
|
; AVX-LABEL: test_x86_sse41_blendvps:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4a,0xc1,0x20]
|
; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4a,0xc1,0x20]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) ; <<4 x float>> [#uses=1]
|
%res = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) ; <<4 x float>> [#uses=1]
|
||||||
ret <4 x float> %res
|
ret <4 x float> %res
|
||||||
}
|
}
|
||||||
|
@ -42,15 +45,15 @@ declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x floa
|
||||||
|
|
||||||
|
|
||||||
define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) {
|
define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_dppd:
|
; SSE-LABEL: test_x86_sse41_dppd:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: dppd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x41,0xc1,0x07]
|
; SSE-NEXT: dppd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x41,0xc1,0x07]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse41_dppd:
|
; AVX-LABEL: test_x86_sse41_dppd:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x41,0xc1,0x07]
|
; AVX-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x41,0xc1,0x07]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
|
%res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
|
||||||
ret <2 x double> %res
|
ret <2 x double> %res
|
||||||
}
|
}
|
||||||
|
@ -58,15 +61,15 @@ declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwi
|
||||||
|
|
||||||
|
|
||||||
define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) {
|
define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_dpps:
|
; SSE-LABEL: test_x86_sse41_dpps:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: dpps $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x40,0xc1,0x07]
|
; SSE-NEXT: dpps $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x40,0xc1,0x07]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse41_dpps:
|
; AVX-LABEL: test_x86_sse41_dpps:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x40,0xc1,0x07]
|
; AVX-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x40,0xc1,0x07]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
|
%res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
|
||||||
ret <4 x float> %res
|
ret <4 x float> %res
|
||||||
}
|
}
|
||||||
|
@ -74,23 +77,23 @@ declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind
|
||||||
|
|
||||||
|
|
||||||
define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
|
define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_insertps:
|
; SSE-LABEL: test_x86_sse41_insertps:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: insertps $17, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x11]
|
; SSE-NEXT: insertps $17, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x11]
|
||||||
; SSE41-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
|
; SSE-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_insertps:
|
; AVX1-LABEL: test_x86_sse41_insertps:
|
||||||
; AVX2: ## %bb.0:
|
; AVX1: ## %bb.0:
|
||||||
; AVX2-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
|
; AVX1-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
|
||||||
; AVX2-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
|
; AVX1-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; SKX-LABEL: test_x86_sse41_insertps:
|
; AVX512-LABEL: test_x86_sse41_insertps:
|
||||||
; SKX: ## %bb.0:
|
; AVX512: ## %bb.0:
|
||||||
; SKX-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
|
; AVX512-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
|
||||||
; SKX-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
|
; AVX512-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 17) ; <<4 x float>> [#uses=1]
|
%res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 17) ; <<4 x float>> [#uses=1]
|
||||||
ret <4 x float> %res
|
ret <4 x float> %res
|
||||||
}
|
}
|
||||||
|
@ -99,15 +102,15 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounw
|
||||||
|
|
||||||
|
|
||||||
define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
|
define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_mpsadbw:
|
; SSE-LABEL: test_x86_sse41_mpsadbw:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: mpsadbw $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x42,0xc1,0x07]
|
; SSE-NEXT: mpsadbw $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x42,0xc1,0x07]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse41_mpsadbw:
|
; AVX-LABEL: test_x86_sse41_mpsadbw:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x42,0xc1,0x07]
|
; AVX-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x42,0xc1,0x07]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<8 x i16>> [#uses=1]
|
%res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<8 x i16>> [#uses=1]
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
|
@ -115,20 +118,15 @@ declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind rea
|
||||||
|
|
||||||
|
|
||||||
define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) {
|
define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_packusdw:
|
; SSE-LABEL: test_x86_sse41_packusdw:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: packusdw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x2b,0xc1]
|
; SSE-NEXT: packusdw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x2b,0xc1]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_packusdw:
|
; AVX-LABEL: test_x86_sse41_packusdw:
|
||||||
; AVX2: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
|
; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
|
||||||
; SKX-LABEL: test_x86_sse41_packusdw:
|
|
||||||
; SKX: ## %bb.0:
|
|
||||||
; SKX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
|
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
|
%res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
|
@ -136,44 +134,51 @@ declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readno
|
||||||
|
|
||||||
|
|
||||||
define <8 x i16> @test_x86_sse41_packusdw_fold() {
|
define <8 x i16> @test_x86_sse41_packusdw_fold() {
|
||||||
; SSE41-LABEL: test_x86_sse41_packusdw_fold:
|
; X86-SSE-LABEL: test_x86_sse41_packusdw_fold:
|
||||||
; SSE41: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
|
; X86-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
|
||||||
; SSE41-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
|
; X86-SSE-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
|
||||||
; SSE41-NEXT: ## fixup A - offset: 3, value: LCPI7_0, kind: FK_Data_4
|
; X86-SSE-NEXT: ## fixup A - offset: 3, value: LCPI7_0, kind: FK_Data_4
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_packusdw_fold:
|
; X86-AVX-LABEL: test_x86_sse41_packusdw_fold:
|
||||||
; AVX2: ## %bb.0:
|
; X86-AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
|
; X86-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
|
||||||
; AVX2-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
|
; X86-AVX-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
|
||||||
; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
|
; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; SKX-LABEL: test_x86_sse41_packusdw_fold:
|
; X64-SSE-LABEL: test_x86_sse41_packusdw_fold:
|
||||||
; SKX: ## %bb.0:
|
; X64-SSE: ## %bb.0:
|
||||||
; SKX-NEXT: vmovaps LCPI7_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [0,0,0,0,65535,65535,0,0]
|
; X64-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
|
||||||
; SKX-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
|
; X64-SSE-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
|
||||||
; SKX-NEXT: ## fixup A - offset: 4, value: LCPI7_0, kind: FK_Data_4
|
; X64-SSE-NEXT: ## fixup A - offset: 3, value: LCPI7_0-4, kind: reloc_riprel_4byte
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_x86_sse41_packusdw_fold:
|
||||||
|
; X64-AVX: ## %bb.0:
|
||||||
|
; X64-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
|
||||||
|
; X64-AVX-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
|
||||||
|
; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI7_0-4, kind: reloc_riprel_4byte
|
||||||
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
|
%res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
|
define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) {
|
||||||
; SSE41-LABEL: test_x86_sse41_pblendvb:
|
; SSE-LABEL: test_x86_sse41_pblendvb:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: movdqa %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x6f,0xd8]
|
; SSE-NEXT: movdqa %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x6f,0xd8]
|
||||||
; SSE41-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
|
; SSE-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
|
||||||
; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x10,0xd9]
|
; SSE-NEXT: pblendvb %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x10,0xd9]
|
||||||
; SSE41-NEXT: movdqa %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc3]
|
; SSE-NEXT: movdqa %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc3]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse41_pblendvb:
|
; AVX-LABEL: test_x86_sse41_pblendvb:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4c,0xc1,0x20]
|
; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4c,0xc1,0x20]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) ; <<16 x i8>> [#uses=1]
|
%res = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) ; <<16 x i8>> [#uses=1]
|
||||||
ret <16 x i8> %res
|
ret <16 x i8> %res
|
||||||
}
|
}
|
||||||
|
@ -181,15 +186,15 @@ declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) noun
|
||||||
|
|
||||||
|
|
||||||
define <8 x i16> @test_x86_sse41_phminposuw(<8 x i16> %a0) {
|
define <8 x i16> @test_x86_sse41_phminposuw(<8 x i16> %a0) {
|
||||||
; SSE41-LABEL: test_x86_sse41_phminposuw:
|
; SSE-LABEL: test_x86_sse41_phminposuw:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: phminposuw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x41,0xc0]
|
; SSE-NEXT: phminposuw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x41,0xc0]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse41_phminposuw:
|
; AVX-LABEL: test_x86_sse41_phminposuw:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: vphminposuw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x41,0xc0]
|
; AVX-NEXT: vphminposuw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x41,0xc0]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %a0) ; <<8 x i16>> [#uses=1]
|
%res = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %a0) ; <<8 x i16>> [#uses=1]
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
|
@ -197,20 +202,15 @@ declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <16 x i8> @test_x86_sse41_pmaxsb(<16 x i8> %a0, <16 x i8> %a1) {
|
define <16 x i8> @test_x86_sse41_pmaxsb(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_pmaxsb:
|
; SSE-LABEL: test_x86_sse41_pmaxsb:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: pmaxsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3c,0xc1]
|
; SSE-NEXT: pmaxsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3c,0xc1]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_pmaxsb:
|
; AVX-LABEL: test_x86_sse41_pmaxsb:
|
||||||
; AVX2: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
|
; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
|
||||||
; SKX-LABEL: test_x86_sse41_pmaxsb:
|
|
||||||
; SKX: ## %bb.0:
|
|
||||||
; SKX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3c,0xc1]
|
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
|
||||||
%res = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
|
%res = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
|
||||||
ret <16 x i8> %res
|
ret <16 x i8> %res
|
||||||
}
|
}
|
||||||
|
@ -218,20 +218,15 @@ declare <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8>, <16 x i8>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <4 x i32> @test_x86_sse41_pmaxsd(<4 x i32> %a0, <4 x i32> %a1) {
|
define <4 x i32> @test_x86_sse41_pmaxsd(<4 x i32> %a0, <4 x i32> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_pmaxsd:
|
; SSE-LABEL: test_x86_sse41_pmaxsd:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: pmaxsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3d,0xc1]
|
; SSE-NEXT: pmaxsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3d,0xc1]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_pmaxsd:
|
; AVX-LABEL: test_x86_sse41_pmaxsd:
|
||||||
; AVX2: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
|
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
|
||||||
; SKX-LABEL: test_x86_sse41_pmaxsd:
|
|
||||||
; SKX: ## %bb.0:
|
|
||||||
; SKX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3d,0xc1]
|
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
|
%res = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
|
@ -239,20 +234,15 @@ declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <4 x i32> @test_x86_sse41_pmaxud(<4 x i32> %a0, <4 x i32> %a1) {
|
define <4 x i32> @test_x86_sse41_pmaxud(<4 x i32> %a0, <4 x i32> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_pmaxud:
|
; SSE-LABEL: test_x86_sse41_pmaxud:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: pmaxud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3f,0xc1]
|
; SSE-NEXT: pmaxud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3f,0xc1]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_pmaxud:
|
; AVX-LABEL: test_x86_sse41_pmaxud:
|
||||||
; AVX2: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
|
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
|
||||||
; SKX-LABEL: test_x86_sse41_pmaxud:
|
|
||||||
; SKX: ## %bb.0:
|
|
||||||
; SKX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3f,0xc1]
|
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
|
%res = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
|
@ -260,20 +250,15 @@ declare <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32>, <4 x i32>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <8 x i16> @test_x86_sse41_pmaxuw(<8 x i16> %a0, <8 x i16> %a1) {
|
define <8 x i16> @test_x86_sse41_pmaxuw(<8 x i16> %a0, <8 x i16> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_pmaxuw:
|
; SSE-LABEL: test_x86_sse41_pmaxuw:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: pmaxuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3e,0xc1]
|
; SSE-NEXT: pmaxuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3e,0xc1]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_pmaxuw:
|
; AVX-LABEL: test_x86_sse41_pmaxuw:
|
||||||
; AVX2: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
|
; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
|
||||||
; SKX-LABEL: test_x86_sse41_pmaxuw:
|
|
||||||
; SKX: ## %bb.0:
|
|
||||||
; SKX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xc1]
|
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
|
%res = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
|
@ -281,20 +266,15 @@ declare <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16>, <8 x i16>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <16 x i8> @test_x86_sse41_pminsb(<16 x i8> %a0, <16 x i8> %a1) {
|
define <16 x i8> @test_x86_sse41_pminsb(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_pminsb:
|
; SSE-LABEL: test_x86_sse41_pminsb:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: pminsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x38,0xc1]
|
; SSE-NEXT: pminsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x38,0xc1]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_pminsb:
|
; AVX-LABEL: test_x86_sse41_pminsb:
|
||||||
; AVX2: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
|
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x38,0xc1]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
|
||||||
; SKX-LABEL: test_x86_sse41_pminsb:
|
|
||||||
; SKX: ## %bb.0:
|
|
||||||
; SKX-NEXT: vpminsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x38,0xc1]
|
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
|
||||||
%res = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
|
%res = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
|
||||||
ret <16 x i8> %res
|
ret <16 x i8> %res
|
||||||
}
|
}
|
||||||
|
@ -302,20 +282,15 @@ declare <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8>, <16 x i8>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <4 x i32> @test_x86_sse41_pminsd(<4 x i32> %a0, <4 x i32> %a1) {
|
define <4 x i32> @test_x86_sse41_pminsd(<4 x i32> %a0, <4 x i32> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_pminsd:
|
; SSE-LABEL: test_x86_sse41_pminsd:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: pminsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x39,0xc1]
|
; SSE-NEXT: pminsd %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x39,0xc1]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_pminsd:
|
; AVX-LABEL: test_x86_sse41_pminsd:
|
||||||
; AVX2: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
|
; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x39,0xc1]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
|
||||||
; SKX-LABEL: test_x86_sse41_pminsd:
|
|
||||||
; SKX: ## %bb.0:
|
|
||||||
; SKX-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x39,0xc1]
|
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
|
%res = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
|
@ -323,20 +298,15 @@ declare <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32>, <4 x i32>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <4 x i32> @test_x86_sse41_pminud(<4 x i32> %a0, <4 x i32> %a1) {
|
define <4 x i32> @test_x86_sse41_pminud(<4 x i32> %a0, <4 x i32> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_pminud:
|
; SSE-LABEL: test_x86_sse41_pminud:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: pminud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3b,0xc1]
|
; SSE-NEXT: pminud %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3b,0xc1]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_pminud:
|
; AVX-LABEL: test_x86_sse41_pminud:
|
||||||
; AVX2: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
|
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
|
||||||
; SKX-LABEL: test_x86_sse41_pminud:
|
|
||||||
; SKX: ## %bb.0:
|
|
||||||
; SKX-NEXT: vpminud %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3b,0xc1]
|
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
|
||||||
%res = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
|
%res = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
|
||||||
ret <4 x i32> %res
|
ret <4 x i32> %res
|
||||||
}
|
}
|
||||||
|
@ -344,20 +314,15 @@ declare <4 x i32> @llvm.x86.sse41.pminud(<4 x i32>, <4 x i32>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <8 x i16> @test_x86_sse41_pminuw(<8 x i16> %a0, <8 x i16> %a1) {
|
define <8 x i16> @test_x86_sse41_pminuw(<8 x i16> %a0, <8 x i16> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_pminuw:
|
; SSE-LABEL: test_x86_sse41_pminuw:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: pminuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3a,0xc1]
|
; SSE-NEXT: pminuw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x3a,0xc1]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_pminuw:
|
; AVX-LABEL: test_x86_sse41_pminuw:
|
||||||
; AVX2: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
|
; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
|
||||||
; SKX-LABEL: test_x86_sse41_pminuw:
|
|
||||||
; SKX: ## %bb.0:
|
|
||||||
; SKX-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3a,0xc1]
|
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
|
||||||
%res = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
|
%res = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
|
||||||
ret <8 x i16> %res
|
ret <8 x i16> %res
|
||||||
}
|
}
|
||||||
|
@ -365,19 +330,19 @@ declare <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16>, <8 x i16>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) {
|
define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_ptestc:
|
; SSE-LABEL: test_x86_sse41_ptestc:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; SSE41-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
|
; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
|
||||||
; SSE41-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
|
; SSE-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse41_ptestc:
|
; AVX-LABEL: test_x86_sse41_ptestc:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; VCHECK-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
|
; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
|
||||||
; VCHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
|
; AVX-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -385,19 +350,19 @@ declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) {
|
define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_ptestnzc:
|
; SSE-LABEL: test_x86_sse41_ptestnzc:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; SSE41-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
|
; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
|
||||||
; SSE41-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
|
; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse41_ptestnzc:
|
; AVX-LABEL: test_x86_sse41_ptestnzc:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; VCHECK-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
|
; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
|
||||||
; VCHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
|
; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -405,19 +370,19 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) {
|
define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_ptestz:
|
; SSE-LABEL: test_x86_sse41_ptestz:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; SSE41-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
|
; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
|
||||||
; SSE41-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
|
; SSE-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse41_ptestz:
|
; AVX-LABEL: test_x86_sse41_ptestz:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; VCHECK-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
|
; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
|
||||||
; VCHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
|
; AVX-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -425,20 +390,15 @@ declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <2 x double> @test_x86_sse41_round_pd(<2 x double> %a0) {
|
define <2 x double> @test_x86_sse41_round_pd(<2 x double> %a0) {
|
||||||
; SSE41-LABEL: test_x86_sse41_round_pd:
|
; SSE-LABEL: test_x86_sse41_round_pd:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: roundpd $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x09,0xc0,0x07]
|
; SSE-NEXT: roundpd $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x09,0xc0,0x07]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_round_pd:
|
; AVX-LABEL: test_x86_sse41_round_pd:
|
||||||
; AVX2: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vroundpd $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
|
; AVX-NEXT: vroundpd $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
|
||||||
; SKX-LABEL: test_x86_sse41_round_pd:
|
|
||||||
; SKX: ## %bb.0:
|
|
||||||
; SKX-NEXT: vroundpd $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
|
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
|
||||||
%res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7) ; <<2 x double>> [#uses=1]
|
%res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7) ; <<2 x double>> [#uses=1]
|
||||||
ret <2 x double> %res
|
ret <2 x double> %res
|
||||||
}
|
}
|
||||||
|
@ -446,20 +406,15 @@ declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readno
|
||||||
|
|
||||||
|
|
||||||
define <4 x float> @test_x86_sse41_round_ps(<4 x float> %a0) {
|
define <4 x float> @test_x86_sse41_round_ps(<4 x float> %a0) {
|
||||||
; SSE41-LABEL: test_x86_sse41_round_ps:
|
; SSE-LABEL: test_x86_sse41_round_ps:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: roundps $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x08,0xc0,0x07]
|
; SSE-NEXT: roundps $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x08,0xc0,0x07]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_round_ps:
|
; AVX-LABEL: test_x86_sse41_round_ps:
|
||||||
; AVX2: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: vroundps $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
|
; AVX-NEXT: vroundps $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
|
||||||
; SKX-LABEL: test_x86_sse41_round_ps:
|
|
||||||
; SKX: ## %bb.0:
|
|
||||||
; SKX-NEXT: vroundps $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
|
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
|
||||||
%res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7) ; <<4 x float>> [#uses=1]
|
%res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7) ; <<4 x float>> [#uses=1]
|
||||||
ret <4 x float> %res
|
ret <4 x float> %res
|
||||||
}
|
}
|
||||||
|
@ -467,20 +422,20 @@ declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
|
||||||
|
|
||||||
|
|
||||||
define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1) {
|
define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_round_sd:
|
; SSE-LABEL: test_x86_sse41_round_sd:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: roundsd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0xc1,0x07]
|
; SSE-NEXT: roundsd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0xc1,0x07]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_round_sd:
|
; AVX1-LABEL: test_x86_sse41_round_sd:
|
||||||
; AVX2: ## %bb.0:
|
; AVX1: ## %bb.0:
|
||||||
; AVX2-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
|
; AVX1-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; SKX-LABEL: test_x86_sse41_round_sd:
|
; AVX512-LABEL: test_x86_sse41_round_sd:
|
||||||
; SKX: ## %bb.0:
|
; AVX512: ## %bb.0:
|
||||||
; SKX-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
|
; AVX512-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7) ; <<2 x double>> [#uses=1]
|
%res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7) ; <<2 x double>> [#uses=1]
|
||||||
ret <2 x double> %res
|
ret <2 x double> %res
|
||||||
}
|
}
|
||||||
|
@ -488,23 +443,38 @@ declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) n
|
||||||
|
|
||||||
|
|
||||||
define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, <2 x double>* %a1) {
|
define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, <2 x double>* %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_round_sd_load:
|
; X86-SSE-LABEL: test_x86_sse41_round_sd_load:
|
||||||
; SSE41: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; SSE41-NEXT: roundsd $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0x00,0x07]
|
; X86-SSE-NEXT: roundsd $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0x00,0x07]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_round_sd_load:
|
; X86-AVX1-LABEL: test_x86_sse41_round_sd_load:
|
||||||
; AVX2: ## %bb.0:
|
; X86-AVX1: ## %bb.0:
|
||||||
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; AVX2-NEXT: vroundsd $7, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
|
; X86-AVX1-NEXT: vroundsd $7, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX1-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; SKX-LABEL: test_x86_sse41_round_sd_load:
|
; X86-AVX512-LABEL: test_x86_sse41_round_sd_load:
|
||||||
; SKX: ## %bb.0:
|
; X86-AVX512: ## %bb.0:
|
||||||
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; SKX-NEXT: vroundsd $7, (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
|
; X86-AVX512-NEXT: vroundsd $7, (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_x86_sse41_round_sd_load:
|
||||||
|
; X64-SSE: ## %bb.0:
|
||||||
|
; X64-SSE-NEXT: roundsd $7, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0x07,0x07]
|
||||||
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-AVX1-LABEL: test_x86_sse41_round_sd_load:
|
||||||
|
; X64-AVX1: ## %bb.0:
|
||||||
|
; X64-AVX1-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0x07,0x07]
|
||||||
|
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-AVX512-LABEL: test_x86_sse41_round_sd_load:
|
||||||
|
; X64-AVX512: ## %bb.0:
|
||||||
|
; X64-AVX512-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0x07,0x07]
|
||||||
|
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
||||||
%a1b = load <2 x double>, <2 x double>* %a1
|
%a1b = load <2 x double>, <2 x double>* %a1
|
||||||
%res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1b, i32 7) ; <<2 x double>> [#uses=1]
|
%res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1b, i32 7) ; <<2 x double>> [#uses=1]
|
||||||
ret <2 x double> %res
|
ret <2 x double> %res
|
||||||
|
@ -512,20 +482,20 @@ define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, <2 x double>
|
||||||
|
|
||||||
|
|
||||||
define <4 x float> @test_x86_sse41_round_ss(<4 x float> %a0, <4 x float> %a1) {
|
define <4 x float> @test_x86_sse41_round_ss(<4 x float> %a0, <4 x float> %a1) {
|
||||||
; SSE41-LABEL: test_x86_sse41_round_ss:
|
; SSE-LABEL: test_x86_sse41_round_ss:
|
||||||
; SSE41: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE41-NEXT: roundss $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0a,0xc1,0x07]
|
; SSE-NEXT: roundss $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0a,0xc1,0x07]
|
||||||
; SSE41-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse41_round_ss:
|
; AVX1-LABEL: test_x86_sse41_round_ss:
|
||||||
; AVX2: ## %bb.0:
|
; AVX1: ## %bb.0:
|
||||||
; AVX2-NEXT: vroundss $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
|
; AVX1-NEXT: vroundss $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; SKX-LABEL: test_x86_sse41_round_ss:
|
; AVX512-LABEL: test_x86_sse41_round_ss:
|
||||||
; SKX: ## %bb.0:
|
; AVX512: ## %bb.0:
|
||||||
; SKX-NEXT: vroundss $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
|
; AVX512-NEXT: vroundss $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
|
%res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7) ; <<4 x float>> [#uses=1]
|
||||||
ret <4 x float> %res
|
ret <4 x float> %res
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,25 +1,27 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
|
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE
|
||||||
|
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
|
||||||
|
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
|
||||||
|
|
||||||
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse42-builtins.c
|
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse42-builtins.c
|
||||||
|
|
||||||
define i64 @test_mm_crc64_u8(i64 %a0, i8 %a1) nounwind{
|
define i64 @test_mm_crc64_u8(i64 %a0, i8 %a1) nounwind{
|
||||||
; X64-LABEL: test_mm_crc64_u8:
|
; CHECK-LABEL: test_mm_crc64_u8:
|
||||||
; X64: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; X64-NEXT: crc32b %sil, %edi
|
; CHECK-NEXT: crc32b %sil, %edi
|
||||||
; X64-NEXT: movl %edi, %eax
|
; CHECK-NEXT: movl %edi, %eax
|
||||||
; X64-NEXT: retq
|
; CHECK-NEXT: retq
|
||||||
%res = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a0, i8 %a1)
|
%res = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a0, i8 %a1)
|
||||||
ret i64 %res
|
ret i64 %res
|
||||||
}
|
}
|
||||||
declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind readnone
|
declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind readnone
|
||||||
|
|
||||||
define i64 @test_mm_crc64_u64(i64 %a0, i64 %a1) nounwind{
|
define i64 @test_mm_crc64_u64(i64 %a0, i64 %a1) nounwind{
|
||||||
; X64-LABEL: test_mm_crc64_u64:
|
; CHECK-LABEL: test_mm_crc64_u64:
|
||||||
; X64: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; X64-NEXT: crc32q %rsi, %rdi
|
; CHECK-NEXT: crc32q %rsi, %rdi
|
||||||
; X64-NEXT: movq %rdi, %rax
|
; CHECK-NEXT: movq %rdi, %rax
|
||||||
; X64-NEXT: retq
|
; CHECK-NEXT: retq
|
||||||
%res = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a0, i64 %a1)
|
%res = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a0, i64 %a1)
|
||||||
ret i64 %res
|
ret i64 %res
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,31 +1,57 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=ALL --check-prefix=X32
|
; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
|
||||||
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=ALL --check-prefix=X64
|
; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
|
||||||
|
; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
|
||||||
|
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
|
||||||
|
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
|
||||||
|
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
|
||||||
|
|
||||||
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse42-builtins.c
|
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse42-builtins.c
|
||||||
|
|
||||||
define i32 @test_mm_cmpestra(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
|
define i32 @test_mm_cmpestra(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
|
||||||
; X32-LABEL: test_mm_cmpestra:
|
; X86-SSE-LABEL: test_mm_cmpestra:
|
||||||
; X32: # %bb.0:
|
; X86-SSE: # %bb.0:
|
||||||
; X32-NEXT: pushl %ebx
|
; X86-SSE-NEXT: pushl %ebx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: xorl %ebx, %ebx
|
; X86-SSE-NEXT: xorl %ebx, %ebx
|
||||||
; X32-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
; X32-NEXT: seta %bl
|
; X86-SSE-NEXT: seta %bl
|
||||||
; X32-NEXT: movl %ebx, %eax
|
; X86-SSE-NEXT: movl %ebx, %eax
|
||||||
; X32-NEXT: popl %ebx
|
; X86-SSE-NEXT: popl %ebx
|
||||||
; X32-NEXT: retl
|
; X86-SSE-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_mm_cmpestra:
|
; X86-AVX-LABEL: test_mm_cmpestra:
|
||||||
; X64: # %bb.0:
|
; X86-AVX: # %bb.0:
|
||||||
; X64-NEXT: xorl %r8d, %r8d
|
; X86-AVX-NEXT: pushl %ebx
|
||||||
; X64-NEXT: movl %edi, %eax
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X64-NEXT: movl %esi, %edx
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X64-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-AVX-NEXT: xorl %ebx, %ebx
|
||||||
; X64-NEXT: seta %r8b
|
; X86-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
; X64-NEXT: movl %r8d, %eax
|
; X86-AVX-NEXT: seta %bl
|
||||||
; X64-NEXT: retq
|
; X86-AVX-NEXT: movl %ebx, %eax
|
||||||
|
; X86-AVX-NEXT: popl %ebx
|
||||||
|
; X86-AVX-NEXT: retl
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_mm_cmpestra:
|
||||||
|
; X64-SSE: # %bb.0:
|
||||||
|
; X64-SSE-NEXT: xorl %r8d, %r8d
|
||||||
|
; X64-SSE-NEXT: movl %edi, %eax
|
||||||
|
; X64-SSE-NEXT: movl %esi, %edx
|
||||||
|
; X64-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-SSE-NEXT: seta %r8b
|
||||||
|
; X64-SSE-NEXT: movl %r8d, %eax
|
||||||
|
; X64-SSE-NEXT: retq
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_mm_cmpestra:
|
||||||
|
; X64-AVX: # %bb.0:
|
||||||
|
; X64-AVX-NEXT: xorl %r8d, %r8d
|
||||||
|
; X64-AVX-NEXT: movl %edi, %eax
|
||||||
|
; X64-AVX-NEXT: movl %esi, %edx
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-AVX-NEXT: seta %r8b
|
||||||
|
; X64-AVX-NEXT: movl %r8d, %eax
|
||||||
|
; X64-AVX-NEXT: retq
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestria128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpestria128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
||||||
|
@ -34,27 +60,49 @@ define i32 @test_mm_cmpestra(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nou
|
||||||
declare i32 @llvm.x86.sse42.pcmpestria128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpestria128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_cmpestrc(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
|
define i32 @test_mm_cmpestrc(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
|
||||||
; X32-LABEL: test_mm_cmpestrc:
|
; X86-SSE-LABEL: test_mm_cmpestrc:
|
||||||
; X32: # %bb.0:
|
; X86-SSE: # %bb.0:
|
||||||
; X32-NEXT: pushl %ebx
|
; X86-SSE-NEXT: pushl %ebx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: xorl %ebx, %ebx
|
; X86-SSE-NEXT: xorl %ebx, %ebx
|
||||||
; X32-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
; X32-NEXT: setb %bl
|
; X86-SSE-NEXT: setb %bl
|
||||||
; X32-NEXT: movl %ebx, %eax
|
; X86-SSE-NEXT: movl %ebx, %eax
|
||||||
; X32-NEXT: popl %ebx
|
; X86-SSE-NEXT: popl %ebx
|
||||||
; X32-NEXT: retl
|
; X86-SSE-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_mm_cmpestrc:
|
; X86-AVX-LABEL: test_mm_cmpestrc:
|
||||||
; X64: # %bb.0:
|
; X86-AVX: # %bb.0:
|
||||||
; X64-NEXT: xorl %r8d, %r8d
|
; X86-AVX-NEXT: pushl %ebx
|
||||||
; X64-NEXT: movl %edi, %eax
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X64-NEXT: movl %esi, %edx
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X64-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-AVX-NEXT: xorl %ebx, %ebx
|
||||||
; X64-NEXT: setb %r8b
|
; X86-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
; X64-NEXT: movl %r8d, %eax
|
; X86-AVX-NEXT: setb %bl
|
||||||
; X64-NEXT: retq
|
; X86-AVX-NEXT: movl %ebx, %eax
|
||||||
|
; X86-AVX-NEXT: popl %ebx
|
||||||
|
; X86-AVX-NEXT: retl
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_mm_cmpestrc:
|
||||||
|
; X64-SSE: # %bb.0:
|
||||||
|
; X64-SSE-NEXT: xorl %r8d, %r8d
|
||||||
|
; X64-SSE-NEXT: movl %edi, %eax
|
||||||
|
; X64-SSE-NEXT: movl %esi, %edx
|
||||||
|
; X64-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-SSE-NEXT: setb %r8b
|
||||||
|
; X64-SSE-NEXT: movl %r8d, %eax
|
||||||
|
; X64-SSE-NEXT: retq
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_mm_cmpestrc:
|
||||||
|
; X64-AVX: # %bb.0:
|
||||||
|
; X64-AVX-NEXT: xorl %r8d, %r8d
|
||||||
|
; X64-AVX-NEXT: movl %edi, %eax
|
||||||
|
; X64-AVX-NEXT: movl %esi, %edx
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-AVX-NEXT: setb %r8b
|
||||||
|
; X64-AVX-NEXT: movl %r8d, %eax
|
||||||
|
; X64-AVX-NEXT: retq
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestric128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpestric128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
||||||
|
@ -63,21 +111,37 @@ define i32 @test_mm_cmpestrc(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nou
|
||||||
declare i32 @llvm.x86.sse42.pcmpestric128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpestric128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_cmpestri(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) {
|
define i32 @test_mm_cmpestri(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) {
|
||||||
; X32-LABEL: test_mm_cmpestri:
|
; X86-SSE-LABEL: test_mm_cmpestri:
|
||||||
; X32: # %bb.0:
|
; X86-SSE: # %bb.0:
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
; X32-NEXT: movl %ecx, %eax
|
; X86-SSE-NEXT: movl %ecx, %eax
|
||||||
; X32-NEXT: retl
|
; X86-SSE-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_mm_cmpestri:
|
; X86-AVX-LABEL: test_mm_cmpestri:
|
||||||
; X64: # %bb.0:
|
; X86-AVX: # %bb.0:
|
||||||
; X64-NEXT: movl %edi, %eax
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X64-NEXT: movl %esi, %edx
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X64-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
; X64-NEXT: movl %ecx, %eax
|
; X86-AVX-NEXT: movl %ecx, %eax
|
||||||
; X64-NEXT: retq
|
; X86-AVX-NEXT: retl
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_mm_cmpestri:
|
||||||
|
; X64-SSE: # %bb.0:
|
||||||
|
; X64-SSE-NEXT: movl %edi, %eax
|
||||||
|
; X64-SSE-NEXT: movl %esi, %edx
|
||||||
|
; X64-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-SSE-NEXT: movl %ecx, %eax
|
||||||
|
; X64-SSE-NEXT: retq
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_mm_cmpestri:
|
||||||
|
; X64-AVX: # %bb.0:
|
||||||
|
; X64-AVX-NEXT: movl %edi, %eax
|
||||||
|
; X64-AVX-NEXT: movl %esi, %edx
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-AVX-NEXT: movl %ecx, %eax
|
||||||
|
; X64-AVX-NEXT: retq
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
||||||
|
@ -86,19 +150,33 @@ define i32 @test_mm_cmpestri(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) {
|
||||||
declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
||||||
|
|
||||||
define <2 x i64> @test_mm_cmpestrm(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) {
|
define <2 x i64> @test_mm_cmpestrm(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) {
|
||||||
; X32-LABEL: test_mm_cmpestrm:
|
; X86-SSE-LABEL: test_mm_cmpestrm:
|
||||||
; X32: # %bb.0:
|
; X86-SSE: # %bb.0:
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: pcmpestrm $7, %xmm1, %xmm0
|
; X86-SSE-NEXT: pcmpestrm $7, %xmm1, %xmm0
|
||||||
; X32-NEXT: retl
|
; X86-SSE-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_mm_cmpestrm:
|
; X86-AVX-LABEL: test_mm_cmpestrm:
|
||||||
; X64: # %bb.0:
|
; X86-AVX: # %bb.0:
|
||||||
; X64-NEXT: movl %edi, %eax
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X64-NEXT: movl %esi, %edx
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X64-NEXT: pcmpestrm $7, %xmm1, %xmm0
|
; X86-AVX-NEXT: vpcmpestrm $7, %xmm1, %xmm0
|
||||||
; X64-NEXT: retq
|
; X86-AVX-NEXT: retl
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_mm_cmpestrm:
|
||||||
|
; X64-SSE: # %bb.0:
|
||||||
|
; X64-SSE-NEXT: movl %edi, %eax
|
||||||
|
; X64-SSE-NEXT: movl %esi, %edx
|
||||||
|
; X64-SSE-NEXT: pcmpestrm $7, %xmm1, %xmm0
|
||||||
|
; X64-SSE-NEXT: retq
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_mm_cmpestrm:
|
||||||
|
; X64-AVX: # %bb.0:
|
||||||
|
; X64-AVX-NEXT: movl %edi, %eax
|
||||||
|
; X64-AVX-NEXT: movl %esi, %edx
|
||||||
|
; X64-AVX-NEXT: vpcmpestrm $7, %xmm1, %xmm0
|
||||||
|
; X64-AVX-NEXT: retq
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
||||||
%res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
%res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
||||||
|
@ -108,27 +186,49 @@ define <2 x i64> @test_mm_cmpestrm(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a
|
||||||
declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_cmpestro(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
|
define i32 @test_mm_cmpestro(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
|
||||||
; X32-LABEL: test_mm_cmpestro:
|
; X86-SSE-LABEL: test_mm_cmpestro:
|
||||||
; X32: # %bb.0:
|
; X86-SSE: # %bb.0:
|
||||||
; X32-NEXT: pushl %ebx
|
; X86-SSE-NEXT: pushl %ebx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: xorl %ebx, %ebx
|
; X86-SSE-NEXT: xorl %ebx, %ebx
|
||||||
; X32-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
; X32-NEXT: seto %bl
|
; X86-SSE-NEXT: seto %bl
|
||||||
; X32-NEXT: movl %ebx, %eax
|
; X86-SSE-NEXT: movl %ebx, %eax
|
||||||
; X32-NEXT: popl %ebx
|
; X86-SSE-NEXT: popl %ebx
|
||||||
; X32-NEXT: retl
|
; X86-SSE-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_mm_cmpestro:
|
; X86-AVX-LABEL: test_mm_cmpestro:
|
||||||
; X64: # %bb.0:
|
; X86-AVX: # %bb.0:
|
||||||
; X64-NEXT: xorl %r8d, %r8d
|
; X86-AVX-NEXT: pushl %ebx
|
||||||
; X64-NEXT: movl %edi, %eax
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X64-NEXT: movl %esi, %edx
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X64-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-AVX-NEXT: xorl %ebx, %ebx
|
||||||
; X64-NEXT: seto %r8b
|
; X86-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
; X64-NEXT: movl %r8d, %eax
|
; X86-AVX-NEXT: seto %bl
|
||||||
; X64-NEXT: retq
|
; X86-AVX-NEXT: movl %ebx, %eax
|
||||||
|
; X86-AVX-NEXT: popl %ebx
|
||||||
|
; X86-AVX-NEXT: retl
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_mm_cmpestro:
|
||||||
|
; X64-SSE: # %bb.0:
|
||||||
|
; X64-SSE-NEXT: xorl %r8d, %r8d
|
||||||
|
; X64-SSE-NEXT: movl %edi, %eax
|
||||||
|
; X64-SSE-NEXT: movl %esi, %edx
|
||||||
|
; X64-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-SSE-NEXT: seto %r8b
|
||||||
|
; X64-SSE-NEXT: movl %r8d, %eax
|
||||||
|
; X64-SSE-NEXT: retq
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_mm_cmpestro:
|
||||||
|
; X64-AVX: # %bb.0:
|
||||||
|
; X64-AVX-NEXT: xorl %r8d, %r8d
|
||||||
|
; X64-AVX-NEXT: movl %edi, %eax
|
||||||
|
; X64-AVX-NEXT: movl %esi, %edx
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-AVX-NEXT: seto %r8b
|
||||||
|
; X64-AVX-NEXT: movl %r8d, %eax
|
||||||
|
; X64-AVX-NEXT: retq
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
||||||
|
@ -137,27 +237,49 @@ define i32 @test_mm_cmpestro(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nou
|
||||||
declare i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_cmpestrs(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
|
define i32 @test_mm_cmpestrs(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
|
||||||
; X32-LABEL: test_mm_cmpestrs:
|
; X86-SSE-LABEL: test_mm_cmpestrs:
|
||||||
; X32: # %bb.0:
|
; X86-SSE: # %bb.0:
|
||||||
; X32-NEXT: pushl %ebx
|
; X86-SSE-NEXT: pushl %ebx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: xorl %ebx, %ebx
|
; X86-SSE-NEXT: xorl %ebx, %ebx
|
||||||
; X32-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
; X32-NEXT: sets %bl
|
; X86-SSE-NEXT: sets %bl
|
||||||
; X32-NEXT: movl %ebx, %eax
|
; X86-SSE-NEXT: movl %ebx, %eax
|
||||||
; X32-NEXT: popl %ebx
|
; X86-SSE-NEXT: popl %ebx
|
||||||
; X32-NEXT: retl
|
; X86-SSE-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_mm_cmpestrs:
|
; X86-AVX-LABEL: test_mm_cmpestrs:
|
||||||
; X64: # %bb.0:
|
; X86-AVX: # %bb.0:
|
||||||
; X64-NEXT: xorl %r8d, %r8d
|
; X86-AVX-NEXT: pushl %ebx
|
||||||
; X64-NEXT: movl %edi, %eax
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X64-NEXT: movl %esi, %edx
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X64-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-AVX-NEXT: xorl %ebx, %ebx
|
||||||
; X64-NEXT: sets %r8b
|
; X86-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
; X64-NEXT: movl %r8d, %eax
|
; X86-AVX-NEXT: sets %bl
|
||||||
; X64-NEXT: retq
|
; X86-AVX-NEXT: movl %ebx, %eax
|
||||||
|
; X86-AVX-NEXT: popl %ebx
|
||||||
|
; X86-AVX-NEXT: retl
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_mm_cmpestrs:
|
||||||
|
; X64-SSE: # %bb.0:
|
||||||
|
; X64-SSE-NEXT: xorl %r8d, %r8d
|
||||||
|
; X64-SSE-NEXT: movl %edi, %eax
|
||||||
|
; X64-SSE-NEXT: movl %esi, %edx
|
||||||
|
; X64-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-SSE-NEXT: sets %r8b
|
||||||
|
; X64-SSE-NEXT: movl %r8d, %eax
|
||||||
|
; X64-SSE-NEXT: retq
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_mm_cmpestrs:
|
||||||
|
; X64-AVX: # %bb.0:
|
||||||
|
; X64-AVX-NEXT: xorl %r8d, %r8d
|
||||||
|
; X64-AVX-NEXT: movl %edi, %eax
|
||||||
|
; X64-AVX-NEXT: movl %esi, %edx
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-AVX-NEXT: sets %r8b
|
||||||
|
; X64-AVX-NEXT: movl %r8d, %eax
|
||||||
|
; X64-AVX-NEXT: retq
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestris128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpestris128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
||||||
|
@ -166,27 +288,49 @@ define i32 @test_mm_cmpestrs(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nou
|
||||||
declare i32 @llvm.x86.sse42.pcmpestris128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpestris128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_cmpestrz(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
|
define i32 @test_mm_cmpestrz(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nounwind {
|
||||||
; X32-LABEL: test_mm_cmpestrz:
|
; X86-SSE-LABEL: test_mm_cmpestrz:
|
||||||
; X32: # %bb.0:
|
; X86-SSE: # %bb.0:
|
||||||
; X32-NEXT: pushl %ebx
|
; X86-SSE-NEXT: pushl %ebx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: xorl %ebx, %ebx
|
; X86-SSE-NEXT: xorl %ebx, %ebx
|
||||||
; X32-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
; X32-NEXT: sete %bl
|
; X86-SSE-NEXT: sete %bl
|
||||||
; X32-NEXT: movl %ebx, %eax
|
; X86-SSE-NEXT: movl %ebx, %eax
|
||||||
; X32-NEXT: popl %ebx
|
; X86-SSE-NEXT: popl %ebx
|
||||||
; X32-NEXT: retl
|
; X86-SSE-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_mm_cmpestrz:
|
; X86-AVX-LABEL: test_mm_cmpestrz:
|
||||||
; X64: # %bb.0:
|
; X86-AVX: # %bb.0:
|
||||||
; X64-NEXT: xorl %r8d, %r8d
|
; X86-AVX-NEXT: pushl %ebx
|
||||||
; X64-NEXT: movl %edi, %eax
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||||
; X64-NEXT: movl %esi, %edx
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X64-NEXT: pcmpestri $7, %xmm1, %xmm0
|
; X86-AVX-NEXT: xorl %ebx, %ebx
|
||||||
; X64-NEXT: sete %r8b
|
; X86-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
; X64-NEXT: movl %r8d, %eax
|
; X86-AVX-NEXT: sete %bl
|
||||||
; X64-NEXT: retq
|
; X86-AVX-NEXT: movl %ebx, %eax
|
||||||
|
; X86-AVX-NEXT: popl %ebx
|
||||||
|
; X86-AVX-NEXT: retl
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_mm_cmpestrz:
|
||||||
|
; X64-SSE: # %bb.0:
|
||||||
|
; X64-SSE-NEXT: xorl %r8d, %r8d
|
||||||
|
; X64-SSE-NEXT: movl %edi, %eax
|
||||||
|
; X64-SSE-NEXT: movl %esi, %edx
|
||||||
|
; X64-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-SSE-NEXT: sete %r8b
|
||||||
|
; X64-SSE-NEXT: movl %r8d, %eax
|
||||||
|
; X64-SSE-NEXT: retq
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_mm_cmpestrz:
|
||||||
|
; X64-AVX: # %bb.0:
|
||||||
|
; X64-AVX-NEXT: xorl %r8d, %r8d
|
||||||
|
; X64-AVX-NEXT: movl %edi, %eax
|
||||||
|
; X64-AVX-NEXT: movl %esi, %edx
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0
|
||||||
|
; X64-AVX-NEXT: sete %r8b
|
||||||
|
; X64-AVX-NEXT: movl %r8d, %eax
|
||||||
|
; X64-AVX-NEXT: retq
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
%arg2 = bitcast <2 x i64> %a2 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8> %arg0, i32 %a1, <16 x i8> %arg2, i32 %a3, i8 7)
|
||||||
|
@ -195,22 +339,34 @@ define i32 @test_mm_cmpestrz(<2 x i64> %a0, i32 %a1, <2 x i64> %a2, i32 %a3) nou
|
||||||
declare i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
|
||||||
|
|
||||||
define <2 x i64> @test_mm_cmpgt_epi64(<2 x i64> %a0, <2 x i64> %a1) {
|
define <2 x i64> @test_mm_cmpgt_epi64(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
; ALL-LABEL: test_mm_cmpgt_epi64:
|
; SSE-LABEL: test_mm_cmpgt_epi64:
|
||||||
; ALL: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; ALL-NEXT: pcmpgtq %xmm1, %xmm0
|
; SSE-NEXT: pcmpgtq %xmm1, %xmm0
|
||||||
; ALL-NEXT: ret{{[l|q]}}
|
; SSE-NEXT: ret{{[l|q]}}
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_mm_cmpgt_epi64:
|
||||||
|
; AVX: # %bb.0:
|
||||||
|
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
|
||||||
|
; AVX-NEXT: ret{{[l|q]}}
|
||||||
%cmp = icmp sgt <2 x i64> %a0, %a1
|
%cmp = icmp sgt <2 x i64> %a0, %a1
|
||||||
%res = sext <2 x i1> %cmp to <2 x i64>
|
%res = sext <2 x i1> %cmp to <2 x i64>
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
}
|
}
|
||||||
|
|
||||||
define i32 @test_mm_cmpistra(<2 x i64> %a0, <2 x i64> %a1) {
|
define i32 @test_mm_cmpistra(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
; ALL-LABEL: test_mm_cmpistra:
|
; SSE-LABEL: test_mm_cmpistra:
|
||||||
; ALL: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; ALL-NEXT: xorl %eax, %eax
|
; SSE-NEXT: xorl %eax, %eax
|
||||||
; ALL-NEXT: pcmpistri $7, %xmm1, %xmm0
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0
|
||||||
; ALL-NEXT: seta %al
|
; SSE-NEXT: seta %al
|
||||||
; ALL-NEXT: ret{{[l|q]}}
|
; SSE-NEXT: ret{{[l|q]}}
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_mm_cmpistra:
|
||||||
|
; AVX: # %bb.0:
|
||||||
|
; AVX-NEXT: xorl %eax, %eax
|
||||||
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0
|
||||||
|
; AVX-NEXT: seta %al
|
||||||
|
; AVX-NEXT: ret{{[l|q]}}
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistria128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpistria128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
||||||
|
@ -219,12 +375,19 @@ define i32 @test_mm_cmpistra(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
declare i32 @llvm.x86.sse42.pcmpistria128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpistria128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_cmpistrc(<2 x i64> %a0, <2 x i64> %a1) {
|
define i32 @test_mm_cmpistrc(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
; ALL-LABEL: test_mm_cmpistrc:
|
; SSE-LABEL: test_mm_cmpistrc:
|
||||||
; ALL: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; ALL-NEXT: xorl %eax, %eax
|
; SSE-NEXT: xorl %eax, %eax
|
||||||
; ALL-NEXT: pcmpistri $7, %xmm1, %xmm0
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0
|
||||||
; ALL-NEXT: setb %al
|
; SSE-NEXT: setb %al
|
||||||
; ALL-NEXT: ret{{[l|q]}}
|
; SSE-NEXT: ret{{[l|q]}}
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_mm_cmpistrc:
|
||||||
|
; AVX: # %bb.0:
|
||||||
|
; AVX-NEXT: xorl %eax, %eax
|
||||||
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0
|
||||||
|
; AVX-NEXT: setb %al
|
||||||
|
; AVX-NEXT: ret{{[l|q]}}
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistric128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpistric128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
||||||
|
@ -233,11 +396,17 @@ define i32 @test_mm_cmpistrc(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
declare i32 @llvm.x86.sse42.pcmpistric128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpistric128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_cmpistri(<2 x i64> %a0, <2 x i64> %a1) {
|
define i32 @test_mm_cmpistri(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
; ALL-LABEL: test_mm_cmpistri:
|
; SSE-LABEL: test_mm_cmpistri:
|
||||||
; ALL: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; ALL-NEXT: pcmpistri $7, %xmm1, %xmm0
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0
|
||||||
; ALL-NEXT: movl %ecx, %eax
|
; SSE-NEXT: movl %ecx, %eax
|
||||||
; ALL-NEXT: ret{{[l|q]}}
|
; SSE-NEXT: ret{{[l|q]}}
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_mm_cmpistri:
|
||||||
|
; AVX: # %bb.0:
|
||||||
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0
|
||||||
|
; AVX-NEXT: movl %ecx, %eax
|
||||||
|
; AVX-NEXT: ret{{[l|q]}}
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
||||||
|
@ -246,10 +415,15 @@ define i32 @test_mm_cmpistri(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
||||||
|
|
||||||
define <2 x i64> @test_mm_cmpistrm(<2 x i64> %a0, <2 x i64> %a1) {
|
define <2 x i64> @test_mm_cmpistrm(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
; ALL-LABEL: test_mm_cmpistrm:
|
; SSE-LABEL: test_mm_cmpistrm:
|
||||||
; ALL: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; ALL-NEXT: pcmpistrm $7, %xmm1, %xmm0
|
; SSE-NEXT: pcmpistrm $7, %xmm1, %xmm0
|
||||||
; ALL-NEXT: ret{{[l|q]}}
|
; SSE-NEXT: ret{{[l|q]}}
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_mm_cmpistrm:
|
||||||
|
; AVX: # %bb.0:
|
||||||
|
; AVX-NEXT: vpcmpistrm $7, %xmm1, %xmm0
|
||||||
|
; AVX-NEXT: ret{{[l|q]}}
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
||||||
%res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
%res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
||||||
|
@ -259,12 +433,19 @@ define <2 x i64> @test_mm_cmpistrm(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_cmpistro(<2 x i64> %a0, <2 x i64> %a1) {
|
define i32 @test_mm_cmpistro(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
; ALL-LABEL: test_mm_cmpistro:
|
; SSE-LABEL: test_mm_cmpistro:
|
||||||
; ALL: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; ALL-NEXT: xorl %eax, %eax
|
; SSE-NEXT: xorl %eax, %eax
|
||||||
; ALL-NEXT: pcmpistri $7, %xmm1, %xmm0
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0
|
||||||
; ALL-NEXT: seto %al
|
; SSE-NEXT: seto %al
|
||||||
; ALL-NEXT: ret{{[l|q]}}
|
; SSE-NEXT: ret{{[l|q]}}
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_mm_cmpistro:
|
||||||
|
; AVX: # %bb.0:
|
||||||
|
; AVX-NEXT: xorl %eax, %eax
|
||||||
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0
|
||||||
|
; AVX-NEXT: seto %al
|
||||||
|
; AVX-NEXT: ret{{[l|q]}}
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
||||||
|
@ -273,12 +454,19 @@ define i32 @test_mm_cmpistro(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
declare i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_cmpistrs(<2 x i64> %a0, <2 x i64> %a1) {
|
define i32 @test_mm_cmpistrs(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
; ALL-LABEL: test_mm_cmpistrs:
|
; SSE-LABEL: test_mm_cmpistrs:
|
||||||
; ALL: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; ALL-NEXT: xorl %eax, %eax
|
; SSE-NEXT: xorl %eax, %eax
|
||||||
; ALL-NEXT: pcmpistri $7, %xmm1, %xmm0
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0
|
||||||
; ALL-NEXT: sets %al
|
; SSE-NEXT: sets %al
|
||||||
; ALL-NEXT: ret{{[l|q]}}
|
; SSE-NEXT: ret{{[l|q]}}
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_mm_cmpistrs:
|
||||||
|
; AVX: # %bb.0:
|
||||||
|
; AVX-NEXT: xorl %eax, %eax
|
||||||
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0
|
||||||
|
; AVX-NEXT: sets %al
|
||||||
|
; AVX-NEXT: ret{{[l|q]}}
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistris128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpistris128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
||||||
|
@ -287,12 +475,19 @@ define i32 @test_mm_cmpistrs(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
declare i32 @llvm.x86.sse42.pcmpistris128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpistris128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_cmpistrz(<2 x i64> %a0, <2 x i64> %a1) {
|
define i32 @test_mm_cmpistrz(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
; ALL-LABEL: test_mm_cmpistrz:
|
; SSE-LABEL: test_mm_cmpistrz:
|
||||||
; ALL: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; ALL-NEXT: xorl %eax, %eax
|
; SSE-NEXT: xorl %eax, %eax
|
||||||
; ALL-NEXT: pcmpistri $7, %xmm1, %xmm0
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0
|
||||||
; ALL-NEXT: sete %al
|
; SSE-NEXT: sete %al
|
||||||
; ALL-NEXT: ret{{[l|q]}}
|
; SSE-NEXT: ret{{[l|q]}}
|
||||||
|
;
|
||||||
|
; AVX-LABEL: test_mm_cmpistrz:
|
||||||
|
; AVX: # %bb.0:
|
||||||
|
; AVX-NEXT: xorl %eax, %eax
|
||||||
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0
|
||||||
|
; AVX-NEXT: sete %al
|
||||||
|
; AVX-NEXT: ret{{[l|q]}}
|
||||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||||
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
%res = call i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8> %arg0, <16 x i8> %arg1, i8 7)
|
||||||
|
@ -301,11 +496,11 @@ define i32 @test_mm_cmpistrz(<2 x i64> %a0, <2 x i64> %a1) {
|
||||||
declare i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8>, <16 x i8>, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_crc32_u8(i32 %a0, i8 %a1) {
|
define i32 @test_mm_crc32_u8(i32 %a0, i8 %a1) {
|
||||||
; X32-LABEL: test_mm_crc32_u8:
|
; X86-LABEL: test_mm_crc32_u8:
|
||||||
; X32: # %bb.0:
|
; X86: # %bb.0:
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: crc32b {{[0-9]+}}(%esp), %eax
|
; X86-NEXT: crc32b {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: retl
|
; X86-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_mm_crc32_u8:
|
; X64-LABEL: test_mm_crc32_u8:
|
||||||
; X64: # %bb.0:
|
; X64: # %bb.0:
|
||||||
|
@ -318,11 +513,11 @@ define i32 @test_mm_crc32_u8(i32 %a0, i8 %a1) {
|
||||||
declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind readnone
|
declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_crc32_u16(i32 %a0, i16 %a1) {
|
define i32 @test_mm_crc32_u16(i32 %a0, i16 %a1) {
|
||||||
; X32-LABEL: test_mm_crc32_u16:
|
; X86-LABEL: test_mm_crc32_u16:
|
||||||
; X32: # %bb.0:
|
; X86: # %bb.0:
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: crc32w {{[0-9]+}}(%esp), %eax
|
; X86-NEXT: crc32w {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: retl
|
; X86-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_mm_crc32_u16:
|
; X64-LABEL: test_mm_crc32_u16:
|
||||||
; X64: # %bb.0:
|
; X64: # %bb.0:
|
||||||
|
@ -335,11 +530,11 @@ define i32 @test_mm_crc32_u16(i32 %a0, i16 %a1) {
|
||||||
declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind readnone
|
declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind readnone
|
||||||
|
|
||||||
define i32 @test_mm_crc32_u32(i32 %a0, i32 %a1) {
|
define i32 @test_mm_crc32_u32(i32 %a0, i32 %a1) {
|
||||||
; X32-LABEL: test_mm_crc32_u32:
|
; X86-LABEL: test_mm_crc32_u32:
|
||||||
; X32: # %bb.0:
|
; X86: # %bb.0:
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: crc32l {{[0-9]+}}(%esp), %eax
|
; X86-NEXT: crc32l {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: retl
|
; X86-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_mm_crc32_u32:
|
; X64-LABEL: test_mm_crc32_u32:
|
||||||
; X64: # %bb.0:
|
; X64: # %bb.0:
|
||||||
|
|
|
@ -1,24 +1,27 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=-avx,+sse4.2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=SSE42
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse4.2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
|
||||||
; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=AVX2
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
|
||||||
; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=SKX
|
; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
|
||||||
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse4.2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
|
||||||
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
|
||||||
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpestri128(<16 x i8> %a0, <16 x i8> %a2) {
|
define i32 @test_x86_sse42_pcmpestri128(<16 x i8> %a0, <16 x i8> %a2) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpestri128:
|
; SSE-LABEL: test_x86_sse42_pcmpestri128:
|
||||||
; SSE42: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
; SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
||||||
; SSE42-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
; SSE-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpestri128:
|
; AVX-LABEL: test_x86_sse42_pcmpestri128:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
; AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
||||||
; VCHECK-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
; AVX-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -26,38 +29,45 @@ declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nou
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) {
|
define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpestri128_load:
|
; X86-SSE-LABEL: test_x86_sse42_pcmpestri128_load:
|
||||||
; SSE42: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
|
||||||
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; SSE42-NEXT: movdqa (%eax), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x00]
|
; X86-SSE-NEXT: movdqa (%eax), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x00]
|
||||||
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: pcmpestri $7, (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0x01,0x07]
|
; X86-SSE-NEXT: pcmpestri $7, (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0x01,0x07]
|
||||||
; SSE42-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
; X86-SSE-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse42_pcmpestri128_load:
|
; X86-AVX-LABEL: test_x86_sse42_pcmpestri128_load:
|
||||||
; AVX2: ## %bb.0:
|
; X86-AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
|
||||||
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; AVX2-NEXT: vmovdqa (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x00]
|
; X86-AVX-NEXT: vmovdqa (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x00]
|
||||||
; AVX2-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; AVX2-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; AVX2-NEXT: vpcmpestri $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x01,0x07]
|
; X86-AVX-NEXT: vpcmpestri $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x01,0x07]
|
||||||
; AVX2-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
; X86-AVX-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; SKX-LABEL: test_x86_sse42_pcmpestri128_load:
|
; X64-SSE-LABEL: test_x86_sse42_pcmpestri128_load:
|
||||||
; SKX: ## %bb.0:
|
; X64-SSE: ## %bb.0:
|
||||||
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x08]
|
; X64-SSE-NEXT: movdqa (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x07]
|
||||||
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
; X64-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; SKX-NEXT: vmovdqa (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x00]
|
; X64-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; SKX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X64-SSE-NEXT: pcmpestri $7, (%rsi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0x06,0x07]
|
||||||
; SKX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X64-SSE-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
; SKX-NEXT: vpcmpestri $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x01,0x07]
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
; SKX-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
;
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
; X64-AVX-LABEL: test_x86_sse42_pcmpestri128_load:
|
||||||
|
; X64-AVX: ## %bb.0:
|
||||||
|
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x07]
|
||||||
|
; X64-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, (%rsi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0x06,0x07]
|
||||||
|
; X64-AVX-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||||
%1 = load <16 x i8>, <16 x i8>* %a0
|
%1 = load <16 x i8>, <16 x i8>* %a0
|
||||||
%2 = load <16 x i8>, <16 x i8>* %a2
|
%2 = load <16 x i8>, <16 x i8>* %a2
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7) ; <i32> [#uses=1]
|
||||||
|
@ -66,29 +76,49 @@ define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) {
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpestria128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
|
define i32 @test_x86_sse42_pcmpestria128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpestria128:
|
; X86-SSE-LABEL: test_x86_sse42_pcmpestria128:
|
||||||
; SSE42: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: pushl %ebx ## encoding: [0x53]
|
; X86-SSE-NEXT: pushl %ebx ## encoding: [0x53]
|
||||||
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
; X86-SSE-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
||||||
; SSE42-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
; X86-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
||||||
; SSE42-NEXT: seta %bl ## encoding: [0x0f,0x97,0xc3]
|
; X86-SSE-NEXT: seta %bl ## encoding: [0x0f,0x97,0xc3]
|
||||||
; SSE42-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
; X86-SSE-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
||||||
; SSE42-NEXT: popl %ebx ## encoding: [0x5b]
|
; X86-SSE-NEXT: popl %ebx ## encoding: [0x5b]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpestria128:
|
; X86-AVX-LABEL: test_x86_sse42_pcmpestria128:
|
||||||
; VCHECK: ## %bb.0:
|
; X86-AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: pushl %ebx ## encoding: [0x53]
|
; X86-AVX-NEXT: pushl %ebx ## encoding: [0x53]
|
||||||
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
; X86-AVX-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
||||||
; VCHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
; X86-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
||||||
; VCHECK-NEXT: seta %bl ## encoding: [0x0f,0x97,0xc3]
|
; X86-AVX-NEXT: seta %bl ## encoding: [0x0f,0x97,0xc3]
|
||||||
; VCHECK-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
; X86-AVX-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
||||||
; VCHECK-NEXT: popl %ebx ## encoding: [0x5b]
|
; X86-AVX-NEXT: popl %ebx ## encoding: [0x5b]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_x86_sse42_pcmpestria128:
|
||||||
|
; X64-SSE: ## %bb.0:
|
||||||
|
; X64-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: xorl %esi, %esi ## encoding: [0x31,0xf6]
|
||||||
|
; X64-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
||||||
|
; X64-SSE-NEXT: seta %sil ## encoding: [0x40,0x0f,0x97,0xc6]
|
||||||
|
; X64-SSE-NEXT: movl %esi, %eax ## encoding: [0x89,0xf0]
|
||||||
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_x86_sse42_pcmpestria128:
|
||||||
|
; X64-AVX: ## %bb.0:
|
||||||
|
; X64-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: xorl %esi, %esi ## encoding: [0x31,0xf6]
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
||||||
|
; X64-AVX-NEXT: seta %sil ## encoding: [0x40,0x0f,0x97,0xc6]
|
||||||
|
; X64-AVX-NEXT: movl %esi, %eax ## encoding: [0x89,0xf0]
|
||||||
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestria128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpestria128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -96,29 +126,49 @@ declare i32 @llvm.x86.sse42.pcmpestria128(<16 x i8>, i32, <16 x i8>, i32, i8) no
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpestric128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
|
define i32 @test_x86_sse42_pcmpestric128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpestric128:
|
; X86-SSE-LABEL: test_x86_sse42_pcmpestric128:
|
||||||
; SSE42: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: pushl %ebx ## encoding: [0x53]
|
; X86-SSE-NEXT: pushl %ebx ## encoding: [0x53]
|
||||||
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
; X86-SSE-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
||||||
; SSE42-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
; X86-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
||||||
; SSE42-NEXT: setb %bl ## encoding: [0x0f,0x92,0xc3]
|
; X86-SSE-NEXT: setb %bl ## encoding: [0x0f,0x92,0xc3]
|
||||||
; SSE42-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
; X86-SSE-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
||||||
; SSE42-NEXT: popl %ebx ## encoding: [0x5b]
|
; X86-SSE-NEXT: popl %ebx ## encoding: [0x5b]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpestric128:
|
; X86-AVX-LABEL: test_x86_sse42_pcmpestric128:
|
||||||
; VCHECK: ## %bb.0:
|
; X86-AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: pushl %ebx ## encoding: [0x53]
|
; X86-AVX-NEXT: pushl %ebx ## encoding: [0x53]
|
||||||
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
; X86-AVX-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
||||||
; VCHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
; X86-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
||||||
; VCHECK-NEXT: setb %bl ## encoding: [0x0f,0x92,0xc3]
|
; X86-AVX-NEXT: setb %bl ## encoding: [0x0f,0x92,0xc3]
|
||||||
; VCHECK-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
; X86-AVX-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
||||||
; VCHECK-NEXT: popl %ebx ## encoding: [0x5b]
|
; X86-AVX-NEXT: popl %ebx ## encoding: [0x5b]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_x86_sse42_pcmpestric128:
|
||||||
|
; X64-SSE: ## %bb.0:
|
||||||
|
; X64-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: xorl %esi, %esi ## encoding: [0x31,0xf6]
|
||||||
|
; X64-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
||||||
|
; X64-SSE-NEXT: setb %sil ## encoding: [0x40,0x0f,0x92,0xc6]
|
||||||
|
; X64-SSE-NEXT: movl %esi, %eax ## encoding: [0x89,0xf0]
|
||||||
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_x86_sse42_pcmpestric128:
|
||||||
|
; X64-AVX: ## %bb.0:
|
||||||
|
; X64-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: xorl %esi, %esi ## encoding: [0x31,0xf6]
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
||||||
|
; X64-AVX-NEXT: setb %sil ## encoding: [0x40,0x0f,0x92,0xc6]
|
||||||
|
; X64-AVX-NEXT: movl %esi, %eax ## encoding: [0x89,0xf0]
|
||||||
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestric128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpestric128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -126,29 +176,49 @@ declare i32 @llvm.x86.sse42.pcmpestric128(<16 x i8>, i32, <16 x i8>, i32, i8) no
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpestrio128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
|
define i32 @test_x86_sse42_pcmpestrio128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpestrio128:
|
; X86-SSE-LABEL: test_x86_sse42_pcmpestrio128:
|
||||||
; SSE42: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: pushl %ebx ## encoding: [0x53]
|
; X86-SSE-NEXT: pushl %ebx ## encoding: [0x53]
|
||||||
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
; X86-SSE-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
||||||
; SSE42-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
; X86-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
||||||
; SSE42-NEXT: seto %bl ## encoding: [0x0f,0x90,0xc3]
|
; X86-SSE-NEXT: seto %bl ## encoding: [0x0f,0x90,0xc3]
|
||||||
; SSE42-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
; X86-SSE-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
||||||
; SSE42-NEXT: popl %ebx ## encoding: [0x5b]
|
; X86-SSE-NEXT: popl %ebx ## encoding: [0x5b]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpestrio128:
|
; X86-AVX-LABEL: test_x86_sse42_pcmpestrio128:
|
||||||
; VCHECK: ## %bb.0:
|
; X86-AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: pushl %ebx ## encoding: [0x53]
|
; X86-AVX-NEXT: pushl %ebx ## encoding: [0x53]
|
||||||
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
; X86-AVX-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
||||||
; VCHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
; X86-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
||||||
; VCHECK-NEXT: seto %bl ## encoding: [0x0f,0x90,0xc3]
|
; X86-AVX-NEXT: seto %bl ## encoding: [0x0f,0x90,0xc3]
|
||||||
; VCHECK-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
; X86-AVX-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
||||||
; VCHECK-NEXT: popl %ebx ## encoding: [0x5b]
|
; X86-AVX-NEXT: popl %ebx ## encoding: [0x5b]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_x86_sse42_pcmpestrio128:
|
||||||
|
; X64-SSE: ## %bb.0:
|
||||||
|
; X64-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: xorl %esi, %esi ## encoding: [0x31,0xf6]
|
||||||
|
; X64-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
||||||
|
; X64-SSE-NEXT: seto %sil ## encoding: [0x40,0x0f,0x90,0xc6]
|
||||||
|
; X64-SSE-NEXT: movl %esi, %eax ## encoding: [0x89,0xf0]
|
||||||
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_x86_sse42_pcmpestrio128:
|
||||||
|
; X64-AVX: ## %bb.0:
|
||||||
|
; X64-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: xorl %esi, %esi ## encoding: [0x31,0xf6]
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
||||||
|
; X64-AVX-NEXT: seto %sil ## encoding: [0x40,0x0f,0x90,0xc6]
|
||||||
|
; X64-AVX-NEXT: movl %esi, %eax ## encoding: [0x89,0xf0]
|
||||||
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -156,29 +226,49 @@ declare i32 @llvm.x86.sse42.pcmpestrio128(<16 x i8>, i32, <16 x i8>, i32, i8) no
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpestris128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
|
define i32 @test_x86_sse42_pcmpestris128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpestris128:
|
; X86-SSE-LABEL: test_x86_sse42_pcmpestris128:
|
||||||
; SSE42: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: pushl %ebx ## encoding: [0x53]
|
; X86-SSE-NEXT: pushl %ebx ## encoding: [0x53]
|
||||||
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
; X86-SSE-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
||||||
; SSE42-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
; X86-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
||||||
; SSE42-NEXT: sets %bl ## encoding: [0x0f,0x98,0xc3]
|
; X86-SSE-NEXT: sets %bl ## encoding: [0x0f,0x98,0xc3]
|
||||||
; SSE42-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
; X86-SSE-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
||||||
; SSE42-NEXT: popl %ebx ## encoding: [0x5b]
|
; X86-SSE-NEXT: popl %ebx ## encoding: [0x5b]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpestris128:
|
; X86-AVX-LABEL: test_x86_sse42_pcmpestris128:
|
||||||
; VCHECK: ## %bb.0:
|
; X86-AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: pushl %ebx ## encoding: [0x53]
|
; X86-AVX-NEXT: pushl %ebx ## encoding: [0x53]
|
||||||
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
; X86-AVX-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
||||||
; VCHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
; X86-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
||||||
; VCHECK-NEXT: sets %bl ## encoding: [0x0f,0x98,0xc3]
|
; X86-AVX-NEXT: sets %bl ## encoding: [0x0f,0x98,0xc3]
|
||||||
; VCHECK-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
; X86-AVX-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
||||||
; VCHECK-NEXT: popl %ebx ## encoding: [0x5b]
|
; X86-AVX-NEXT: popl %ebx ## encoding: [0x5b]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_x86_sse42_pcmpestris128:
|
||||||
|
; X64-SSE: ## %bb.0:
|
||||||
|
; X64-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: xorl %esi, %esi ## encoding: [0x31,0xf6]
|
||||||
|
; X64-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
||||||
|
; X64-SSE-NEXT: sets %sil ## encoding: [0x40,0x0f,0x98,0xc6]
|
||||||
|
; X64-SSE-NEXT: movl %esi, %eax ## encoding: [0x89,0xf0]
|
||||||
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_x86_sse42_pcmpestris128:
|
||||||
|
; X64-AVX: ## %bb.0:
|
||||||
|
; X64-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: xorl %esi, %esi ## encoding: [0x31,0xf6]
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
||||||
|
; X64-AVX-NEXT: sets %sil ## encoding: [0x40,0x0f,0x98,0xc6]
|
||||||
|
; X64-AVX-NEXT: movl %esi, %eax ## encoding: [0x89,0xf0]
|
||||||
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestris128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpestris128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -186,29 +276,49 @@ declare i32 @llvm.x86.sse42.pcmpestris128(<16 x i8>, i32, <16 x i8>, i32, i8) no
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpestriz128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
|
define i32 @test_x86_sse42_pcmpestriz128(<16 x i8> %a0, <16 x i8> %a2) nounwind {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpestriz128:
|
; X86-SSE-LABEL: test_x86_sse42_pcmpestriz128:
|
||||||
; SSE42: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: pushl %ebx ## encoding: [0x53]
|
; X86-SSE-NEXT: pushl %ebx ## encoding: [0x53]
|
||||||
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
; X86-SSE-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
||||||
; SSE42-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
; X86-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
||||||
; SSE42-NEXT: sete %bl ## encoding: [0x0f,0x94,0xc3]
|
; X86-SSE-NEXT: sete %bl ## encoding: [0x0f,0x94,0xc3]
|
||||||
; SSE42-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
; X86-SSE-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
||||||
; SSE42-NEXT: popl %ebx ## encoding: [0x5b]
|
; X86-SSE-NEXT: popl %ebx ## encoding: [0x5b]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpestriz128:
|
; X86-AVX-LABEL: test_x86_sse42_pcmpestriz128:
|
||||||
; VCHECK: ## %bb.0:
|
; X86-AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: pushl %ebx ## encoding: [0x53]
|
; X86-AVX-NEXT: pushl %ebx ## encoding: [0x53]
|
||||||
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
; X86-AVX-NEXT: xorl %ebx, %ebx ## encoding: [0x31,0xdb]
|
||||||
; VCHECK-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
; X86-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
||||||
; VCHECK-NEXT: sete %bl ## encoding: [0x0f,0x94,0xc3]
|
; X86-AVX-NEXT: sete %bl ## encoding: [0x0f,0x94,0xc3]
|
||||||
; VCHECK-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
; X86-AVX-NEXT: movl %ebx, %eax ## encoding: [0x89,0xd8]
|
||||||
; VCHECK-NEXT: popl %ebx ## encoding: [0x5b]
|
; X86-AVX-NEXT: popl %ebx ## encoding: [0x5b]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_x86_sse42_pcmpestriz128:
|
||||||
|
; X64-SSE: ## %bb.0:
|
||||||
|
; X64-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: xorl %esi, %esi ## encoding: [0x31,0xf6]
|
||||||
|
; X64-SSE-NEXT: pcmpestri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x61,0xc1,0x07]
|
||||||
|
; X64-SSE-NEXT: sete %sil ## encoding: [0x40,0x0f,0x94,0xc6]
|
||||||
|
; X64-SSE-NEXT: movl %esi, %eax ## encoding: [0x89,0xf0]
|
||||||
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_x86_sse42_pcmpestriz128:
|
||||||
|
; X64-AVX: ## %bb.0:
|
||||||
|
; X64-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: xorl %esi, %esi ## encoding: [0x31,0xf6]
|
||||||
|
; X64-AVX-NEXT: vpcmpestri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x61,0xc1,0x07]
|
||||||
|
; X64-AVX-NEXT: sete %sil ## encoding: [0x40,0x0f,0x94,0xc6]
|
||||||
|
; X64-AVX-NEXT: movl %esi, %eax ## encoding: [0x89,0xf0]
|
||||||
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -216,19 +326,19 @@ declare i32 @llvm.x86.sse42.pcmpestriz128(<16 x i8>, i32, <16 x i8>, i32, i8) no
|
||||||
|
|
||||||
|
|
||||||
define <16 x i8> @test_x86_sse42_pcmpestrm128(<16 x i8> %a0, <16 x i8> %a2) {
|
define <16 x i8> @test_x86_sse42_pcmpestrm128(<16 x i8> %a0, <16 x i8> %a2) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpestrm128:
|
; SSE-LABEL: test_x86_sse42_pcmpestrm128:
|
||||||
; SSE42: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: pcmpestrm $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x60,0xc1,0x07]
|
; SSE-NEXT: pcmpestrm $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x60,0xc1,0x07]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpestrm128:
|
; AVX-LABEL: test_x86_sse42_pcmpestrm128:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: vpcmpestrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0xc1,0x07]
|
; AVX-NEXT: vpcmpestrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0xc1,0x07]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <<16 x i8>> [#uses=1]
|
%res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a2, i32 7, i8 7) ; <<16 x i8>> [#uses=1]
|
||||||
ret <16 x i8> %res
|
ret <16 x i8> %res
|
||||||
}
|
}
|
||||||
|
@ -236,21 +346,35 @@ declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i
|
||||||
|
|
||||||
|
|
||||||
define <16 x i8> @test_x86_sse42_pcmpestrm128_load(<16 x i8> %a0, <16 x i8>* %a2) {
|
define <16 x i8> @test_x86_sse42_pcmpestrm128_load(<16 x i8> %a0, <16 x i8>* %a2) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpestrm128_load:
|
; X86-SSE-LABEL: test_x86_sse42_pcmpestrm128_load:
|
||||||
; SSE42: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
||||||
; SSE42-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; SSE42-NEXT: pcmpestrm $7, (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x60,0x01,0x07]
|
; X86-SSE-NEXT: pcmpestrm $7, (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x60,0x01,0x07]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpestrm128_load:
|
; X86-AVX-LABEL: test_x86_sse42_pcmpestrm128_load:
|
||||||
; VCHECK: ## %bb.0:
|
; X86-AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
||||||
; VCHECK-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
; X86-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
; VCHECK-NEXT: vpcmpestrm $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0x01,0x07]
|
; X86-AVX-NEXT: vpcmpestrm $7, (%ecx), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0x01,0x07]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_x86_sse42_pcmpestrm128_load:
|
||||||
|
; X64-SSE: ## %bb.0:
|
||||||
|
; X64-SSE-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-SSE-NEXT: pcmpestrm $7, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x60,0x07,0x07]
|
||||||
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_x86_sse42_pcmpestrm128_load:
|
||||||
|
; X64-AVX: ## %bb.0:
|
||||||
|
; X64-AVX-NEXT: movl $7, %eax ## encoding: [0xb8,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: movl $7, %edx ## encoding: [0xba,0x07,0x00,0x00,0x00]
|
||||||
|
; X64-AVX-NEXT: vpcmpestrm $7, (%rdi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x60,0x07,0x07]
|
||||||
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||||
%1 = load <16 x i8>, <16 x i8>* %a2
|
%1 = load <16 x i8>, <16 x i8>* %a2
|
||||||
%res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %1, i32 7, i8 7) ; <<16 x i8>> [#uses=1]
|
%res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %1, i32 7, i8 7) ; <<16 x i8>> [#uses=1]
|
||||||
ret <16 x i8> %res
|
ret <16 x i8> %res
|
||||||
|
@ -258,17 +382,17 @@ define <16 x i8> @test_x86_sse42_pcmpestrm128_load(<16 x i8> %a0, <16 x i8>* %a2
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpistri128(<16 x i8> %a0, <16 x i8> %a1) {
|
define i32 @test_x86_sse42_pcmpistri128(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpistri128:
|
; SSE-LABEL: test_x86_sse42_pcmpistri128:
|
||||||
; SSE42: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
||||||
; SSE42-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
; SSE-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpistri128:
|
; AVX-LABEL: test_x86_sse42_pcmpistri128:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
||||||
; VCHECK-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
; AVX-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -276,32 +400,37 @@ declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind read
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
|
define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpistri128_load:
|
; X86-SSE-LABEL: test_x86_sse42_pcmpistri128_load:
|
||||||
; SSE42: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
|
||||||
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
||||||
; SSE42-NEXT: movdqa (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x01]
|
; X86-SSE-NEXT: movdqa (%ecx), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x01]
|
||||||
; SSE42-NEXT: pcmpistri $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0x00,0x07]
|
; X86-SSE-NEXT: pcmpistri $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0x00,0x07]
|
||||||
; SSE42-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
; X86-SSE-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: test_x86_sse42_pcmpistri128_load:
|
; X86-AVX-LABEL: test_x86_sse42_pcmpistri128_load:
|
||||||
; AVX2: ## %bb.0:
|
; X86-AVX: ## %bb.0:
|
||||||
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
|
||||||
; AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
||||||
; AVX2-NEXT: vmovdqa (%ecx), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x01]
|
; X86-AVX-NEXT: vmovdqa (%ecx), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x01]
|
||||||
; AVX2-NEXT: vpcmpistri $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x00,0x07]
|
; X86-AVX-NEXT: vpcmpistri $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x00,0x07]
|
||||||
; AVX2-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
; X86-AVX-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
; AVX2-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; SKX-LABEL: test_x86_sse42_pcmpistri128_load:
|
; X64-SSE-LABEL: test_x86_sse42_pcmpistri128_load:
|
||||||
; SKX: ## %bb.0:
|
; X64-SSE: ## %bb.0:
|
||||||
; SKX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08]
|
; X64-SSE-NEXT: movdqa (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x6f,0x07]
|
||||||
; SKX-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04]
|
; X64-SSE-NEXT: pcmpistri $7, (%rsi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0x06,0x07]
|
||||||
; SKX-NEXT: vmovdqa (%ecx), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x01]
|
; X64-SSE-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
; SKX-NEXT: vpcmpistri $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x00,0x07]
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
; SKX-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
;
|
||||||
; SKX-NEXT: retl ## encoding: [0xc3]
|
; X64-AVX-LABEL: test_x86_sse42_pcmpistri128_load:
|
||||||
|
; X64-AVX: ## %bb.0:
|
||||||
|
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0x6f,0x07]
|
||||||
|
; X64-AVX-NEXT: vpcmpistri $7, (%rsi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0x06,0x07]
|
||||||
|
; X64-AVX-NEXT: movl %ecx, %eax ## encoding: [0x89,0xc8]
|
||||||
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||||
%1 = load <16 x i8>, <16 x i8>* %a0
|
%1 = load <16 x i8>, <16 x i8>* %a0
|
||||||
%2 = load <16 x i8>, <16 x i8>* %a1
|
%2 = load <16 x i8>, <16 x i8>* %a1
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %1, <16 x i8> %2, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %1, <16 x i8> %2, i8 7) ; <i32> [#uses=1]
|
||||||
|
@ -310,19 +439,19 @@ define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpistria128(<16 x i8> %a0, <16 x i8> %a1) {
|
define i32 @test_x86_sse42_pcmpistria128(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpistria128:
|
; SSE-LABEL: test_x86_sse42_pcmpistria128:
|
||||||
; SSE42: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
||||||
; SSE42-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
|
; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpistria128:
|
; AVX-LABEL: test_x86_sse42_pcmpistria128:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
||||||
; VCHECK-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
|
; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistria128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpistria128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -330,19 +459,19 @@ declare i32 @llvm.x86.sse42.pcmpistria128(<16 x i8>, <16 x i8>, i8) nounwind rea
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpistric128(<16 x i8> %a0, <16 x i8> %a1) {
|
define i32 @test_x86_sse42_pcmpistric128(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpistric128:
|
; SSE-LABEL: test_x86_sse42_pcmpistric128:
|
||||||
; SSE42: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
||||||
; SSE42-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
|
; SSE-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpistric128:
|
; AVX-LABEL: test_x86_sse42_pcmpistric128:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
||||||
; VCHECK-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
|
; AVX-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistric128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpistric128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -350,19 +479,19 @@ declare i32 @llvm.x86.sse42.pcmpistric128(<16 x i8>, <16 x i8>, i8) nounwind rea
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpistrio128(<16 x i8> %a0, <16 x i8> %a1) {
|
define i32 @test_x86_sse42_pcmpistrio128(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpistrio128:
|
; SSE-LABEL: test_x86_sse42_pcmpistrio128:
|
||||||
; SSE42: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
||||||
; SSE42-NEXT: seto %al ## encoding: [0x0f,0x90,0xc0]
|
; SSE-NEXT: seto %al ## encoding: [0x0f,0x90,0xc0]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpistrio128:
|
; AVX-LABEL: test_x86_sse42_pcmpistrio128:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
||||||
; VCHECK-NEXT: seto %al ## encoding: [0x0f,0x90,0xc0]
|
; AVX-NEXT: seto %al ## encoding: [0x0f,0x90,0xc0]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -370,19 +499,19 @@ declare i32 @llvm.x86.sse42.pcmpistrio128(<16 x i8>, <16 x i8>, i8) nounwind rea
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpistris128(<16 x i8> %a0, <16 x i8> %a1) {
|
define i32 @test_x86_sse42_pcmpistris128(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpistris128:
|
; SSE-LABEL: test_x86_sse42_pcmpistris128:
|
||||||
; SSE42: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
||||||
; SSE42-NEXT: sets %al ## encoding: [0x0f,0x98,0xc0]
|
; SSE-NEXT: sets %al ## encoding: [0x0f,0x98,0xc0]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpistris128:
|
; AVX-LABEL: test_x86_sse42_pcmpistris128:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
||||||
; VCHECK-NEXT: sets %al ## encoding: [0x0f,0x98,0xc0]
|
; AVX-NEXT: sets %al ## encoding: [0x0f,0x98,0xc0]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistris128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpistris128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -390,19 +519,19 @@ declare i32 @llvm.x86.sse42.pcmpistris128(<16 x i8>, <16 x i8>, i8) nounwind rea
|
||||||
|
|
||||||
|
|
||||||
define i32 @test_x86_sse42_pcmpistriz128(<16 x i8> %a0, <16 x i8> %a1) {
|
define i32 @test_x86_sse42_pcmpistriz128(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpistriz128:
|
; SSE-LABEL: test_x86_sse42_pcmpistriz128:
|
||||||
; SSE42: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; SSE42-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
; SSE-NEXT: pcmpistri $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x63,0xc1,0x07]
|
||||||
; SSE42-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
|
; SSE-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpistriz128:
|
; AVX-LABEL: test_x86_sse42_pcmpistriz128:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
|
||||||
; VCHECK-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
; AVX-NEXT: vpcmpistri $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x63,0xc1,0x07]
|
||||||
; VCHECK-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
|
; AVX-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
%res = call i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <i32> [#uses=1]
|
||||||
ret i32 %res
|
ret i32 %res
|
||||||
}
|
}
|
||||||
|
@ -410,15 +539,15 @@ declare i32 @llvm.x86.sse42.pcmpistriz128(<16 x i8>, <16 x i8>, i8) nounwind rea
|
||||||
|
|
||||||
|
|
||||||
define <16 x i8> @test_x86_sse42_pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1) {
|
define <16 x i8> @test_x86_sse42_pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpistrm128:
|
; SSE-LABEL: test_x86_sse42_pcmpistrm128:
|
||||||
; SSE42: ## %bb.0:
|
; SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: pcmpistrm $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0xc1,0x07]
|
; SSE-NEXT: pcmpistrm $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0xc1,0x07]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpistrm128:
|
; AVX-LABEL: test_x86_sse42_pcmpistrm128:
|
||||||
; VCHECK: ## %bb.0:
|
; AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: vpcmpistrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0xc1,0x07]
|
; AVX-NEXT: vpcmpistrm $7, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0xc1,0x07]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||||
%res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<16 x i8>> [#uses=1]
|
%res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<16 x i8>> [#uses=1]
|
||||||
ret <16 x i8> %res
|
ret <16 x i8> %res
|
||||||
}
|
}
|
||||||
|
@ -426,50 +555,78 @@ declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwin
|
||||||
|
|
||||||
|
|
||||||
define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1) {
|
define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1) {
|
||||||
; SSE42-LABEL: test_x86_sse42_pcmpistrm128_load:
|
; X86-SSE-LABEL: test_x86_sse42_pcmpistrm128_load:
|
||||||
; SSE42: ## %bb.0:
|
; X86-SSE: ## %bb.0:
|
||||||
; SSE42-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; SSE42-NEXT: pcmpistrm $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0x00,0x07]
|
; X86-SSE-NEXT: pcmpistrm $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0x00,0x07]
|
||||||
; SSE42-NEXT: retl ## encoding: [0xc3]
|
; X86-SSE-NEXT: retl ## encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; VCHECK-LABEL: test_x86_sse42_pcmpistrm128_load:
|
; X86-AVX-LABEL: test_x86_sse42_pcmpistrm128_load:
|
||||||
; VCHECK: ## %bb.0:
|
; X86-AVX: ## %bb.0:
|
||||||
; VCHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; VCHECK-NEXT: vpcmpistrm $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0x00,0x07]
|
; X86-AVX-NEXT: vpcmpistrm $7, (%eax), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0x00,0x07]
|
||||||
; VCHECK-NEXT: retl ## encoding: [0xc3]
|
; X86-AVX-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-SSE-LABEL: test_x86_sse42_pcmpistrm128_load:
|
||||||
|
; X64-SSE: ## %bb.0:
|
||||||
|
; X64-SSE-NEXT: pcmpistrm $7, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x62,0x07,0x07]
|
||||||
|
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-AVX-LABEL: test_x86_sse42_pcmpistrm128_load:
|
||||||
|
; X64-AVX: ## %bb.0:
|
||||||
|
; X64-AVX-NEXT: vpcmpistrm $7, (%rdi), %xmm0 ## encoding: [0xc4,0xe3,0x79,0x62,0x07,0x07]
|
||||||
|
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||||
%1 = load <16 x i8>, <16 x i8>* %a1, align 1
|
%1 = load <16 x i8>, <16 x i8>* %a1, align 1
|
||||||
%res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %1, i8 7) ; <<16 x i8>> [#uses=1]
|
%res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %1, i8 7) ; <<16 x i8>> [#uses=1]
|
||||||
ret <16 x i8> %res
|
ret <16 x i8> %res
|
||||||
}
|
}
|
||||||
|
|
||||||
define i32 @crc32_32_8(i32 %a, i8 %b) nounwind {
|
define i32 @crc32_32_8(i32 %a, i8 %b) nounwind {
|
||||||
; CHECK-LABEL: crc32_32_8:
|
; X86-LABEL: crc32_32_8:
|
||||||
; CHECK: ## %bb.0:
|
; X86: ## %bb.0:
|
||||||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; CHECK-NEXT: crc32b {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf0,0x44,0x24,0x08]
|
; X86-NEXT: crc32b {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf0,0x44,0x24,0x08]
|
||||||
; CHECK-NEXT: retl ## encoding: [0xc3]
|
; X86-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-LABEL: crc32_32_8:
|
||||||
|
; X64: ## %bb.0:
|
||||||
|
; X64-NEXT: crc32b %sil, %edi ## encoding: [0xf2,0x40,0x0f,0x38,0xf0,0xfe]
|
||||||
|
; X64-NEXT: movl %edi, %eax ## encoding: [0x89,0xf8]
|
||||||
|
; X64-NEXT: retq ## encoding: [0xc3]
|
||||||
%tmp = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a, i8 %b)
|
%tmp = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a, i8 %b)
|
||||||
ret i32 %tmp
|
ret i32 %tmp
|
||||||
}
|
}
|
||||||
declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind
|
declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind
|
||||||
|
|
||||||
define i32 @crc32_32_16(i32 %a, i16 %b) nounwind {
|
define i32 @crc32_32_16(i32 %a, i16 %b) nounwind {
|
||||||
; CHECK-LABEL: crc32_32_16:
|
; X86-LABEL: crc32_32_16:
|
||||||
; CHECK: ## %bb.0:
|
; X86: ## %bb.0:
|
||||||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; CHECK-NEXT: crc32w {{[0-9]+}}(%esp), %eax ## encoding: [0x66,0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
|
; X86-NEXT: crc32w {{[0-9]+}}(%esp), %eax ## encoding: [0x66,0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
|
||||||
; CHECK-NEXT: retl ## encoding: [0xc3]
|
; X86-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-LABEL: crc32_32_16:
|
||||||
|
; X64: ## %bb.0:
|
||||||
|
; X64-NEXT: crc32w %si, %edi ## encoding: [0x66,0xf2,0x0f,0x38,0xf1,0xfe]
|
||||||
|
; X64-NEXT: movl %edi, %eax ## encoding: [0x89,0xf8]
|
||||||
|
; X64-NEXT: retq ## encoding: [0xc3]
|
||||||
%tmp = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a, i16 %b)
|
%tmp = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a, i16 %b)
|
||||||
ret i32 %tmp
|
ret i32 %tmp
|
||||||
}
|
}
|
||||||
declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind
|
declare i32 @llvm.x86.sse42.crc32.32.16(i32, i16) nounwind
|
||||||
|
|
||||||
define i32 @crc32_32_32(i32 %a, i32 %b) nounwind {
|
define i32 @crc32_32_32(i32 %a, i32 %b) nounwind {
|
||||||
; CHECK-LABEL: crc32_32_32:
|
; X86-LABEL: crc32_32_32:
|
||||||
; CHECK: ## %bb.0:
|
; X86: ## %bb.0:
|
||||||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; CHECK-NEXT: crc32l {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
|
; X86-NEXT: crc32l {{[0-9]+}}(%esp), %eax ## encoding: [0xf2,0x0f,0x38,0xf1,0x44,0x24,0x08]
|
||||||
; CHECK-NEXT: retl ## encoding: [0xc3]
|
; X86-NEXT: retl ## encoding: [0xc3]
|
||||||
|
;
|
||||||
|
; X64-LABEL: crc32_32_32:
|
||||||
|
; X64: ## %bb.0:
|
||||||
|
; X64-NEXT: crc32l %esi, %edi ## encoding: [0xf2,0x0f,0x38,0xf1,0xfe]
|
||||||
|
; X64-NEXT: movl %edi, %eax ## encoding: [0x89,0xf8]
|
||||||
|
; X64-NEXT: retq ## encoding: [0xc3]
|
||||||
%tmp = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a, i32 %b)
|
%tmp = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a, i32 %b)
|
||||||
ret i32 %tmp
|
ret i32 %tmp
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse4.2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=SSE42
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse4.2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,SSE
|
||||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=AVX2
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
|
||||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=VCHECK --check-prefix=SKX
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
|
||||||
|
|
||||||
declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind
|
declare i64 @llvm.x86.sse42.crc32.64.8(i64, i8) nounwind
|
||||||
declare i64 @llvm.x86.sse42.crc32.64.64(i64, i64) nounwind
|
declare i64 @llvm.x86.sse42.crc32.64.64(i64, i64) nounwind
|
||||||
|
|
|
@ -1,36 +1,26 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X32
|
; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefixes=CHECK,X86
|
||||||
; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X32
|
; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefixes=CHECK,X86
|
||||||
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X64
|
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefixes=CHECK,X64
|
||||||
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X64
|
; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefixes=CHECK,X64
|
||||||
|
|
||||||
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse4a-builtins.c
|
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse4a-builtins.c
|
||||||
|
|
||||||
define <2 x i64> @test_mm_extracti_si64(<2 x i64> %x) {
|
define <2 x i64> @test_mm_extracti_si64(<2 x i64> %x) {
|
||||||
; X32-LABEL: test_mm_extracti_si64:
|
; CHECK-LABEL: test_mm_extracti_si64:
|
||||||
; X32: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; X32-NEXT: extrq $2, $3, %xmm0
|
; CHECK-NEXT: extrq $2, $3, %xmm0
|
||||||
; X32-NEXT: retl
|
; CHECK-NEXT: ret{{[l|q]}}
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm_extracti_si64:
|
|
||||||
; X64: # %bb.0:
|
|
||||||
; X64-NEXT: extrq $2, $3, %xmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
%res = call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2)
|
%res = call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2)
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
}
|
}
|
||||||
declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind readnone
|
declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind readnone
|
||||||
|
|
||||||
define <2 x i64> @test_mm_extract_si64(<2 x i64> %x, <2 x i64> %y) {
|
define <2 x i64> @test_mm_extract_si64(<2 x i64> %x, <2 x i64> %y) {
|
||||||
; X32-LABEL: test_mm_extract_si64:
|
; CHECK-LABEL: test_mm_extract_si64:
|
||||||
; X32: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; X32-NEXT: extrq %xmm1, %xmm0
|
; CHECK-NEXT: extrq %xmm1, %xmm0
|
||||||
; X32-NEXT: retl
|
; CHECK-NEXT: ret{{[l|q]}}
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm_extract_si64:
|
|
||||||
; X64: # %bb.0:
|
|
||||||
; X64-NEXT: extrq %xmm1, %xmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
%bc = bitcast <2 x i64> %y to <16 x i8>
|
%bc = bitcast <2 x i64> %y to <16 x i8>
|
||||||
%res = call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %bc)
|
%res = call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %bc)
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
|
@ -38,41 +28,31 @@ define <2 x i64> @test_mm_extract_si64(<2 x i64> %x, <2 x i64> %y) {
|
||||||
declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind readnone
|
declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind readnone
|
||||||
|
|
||||||
define <2 x i64> @test_mm_inserti_si64(<2 x i64> %x, <2 x i64> %y) {
|
define <2 x i64> @test_mm_inserti_si64(<2 x i64> %x, <2 x i64> %y) {
|
||||||
; X32-LABEL: test_mm_inserti_si64:
|
; CHECK-LABEL: test_mm_inserti_si64:
|
||||||
; X32: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; X32-NEXT: insertq $6, $5, %xmm1, %xmm0
|
; CHECK-NEXT: insertq $6, $5, %xmm1, %xmm0
|
||||||
; X32-NEXT: retl
|
; CHECK-NEXT: ret{{[l|q]}}
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm_inserti_si64:
|
|
||||||
; X64: # %bb.0:
|
|
||||||
; X64-NEXT: insertq $6, $5, %xmm1, %xmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
%res = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6)
|
%res = call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6)
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
}
|
}
|
||||||
declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind readnone
|
declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind readnone
|
||||||
|
|
||||||
define <2 x i64> @test_mm_insert_si64(<2 x i64> %x, <2 x i64> %y) {
|
define <2 x i64> @test_mm_insert_si64(<2 x i64> %x, <2 x i64> %y) {
|
||||||
; X32-LABEL: test_mm_insert_si64:
|
; CHECK-LABEL: test_mm_insert_si64:
|
||||||
; X32: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; X32-NEXT: insertq %xmm1, %xmm0
|
; CHECK-NEXT: insertq %xmm1, %xmm0
|
||||||
; X32-NEXT: retl
|
; CHECK-NEXT: ret{{[l|q]}}
|
||||||
;
|
|
||||||
; X64-LABEL: test_mm_insert_si64:
|
|
||||||
; X64: # %bb.0:
|
|
||||||
; X64-NEXT: insertq %xmm1, %xmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
%res = call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y)
|
%res = call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y)
|
||||||
ret <2 x i64> %res
|
ret <2 x i64> %res
|
||||||
}
|
}
|
||||||
declare <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64>, <2 x i64>) nounwind readnone
|
declare <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64>, <2 x i64>) nounwind readnone
|
||||||
|
|
||||||
define void @test_stream_sd(double* %p, <2 x double> %a) {
|
define void @test_stream_sd(double* %p, <2 x double> %a) {
|
||||||
; X32-LABEL: test_stream_sd:
|
; X86-LABEL: test_stream_sd:
|
||||||
; X32: # %bb.0:
|
; X86: # %bb.0:
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: movntsd %xmm0, (%eax)
|
; X86-NEXT: movntsd %xmm0, (%eax)
|
||||||
; X32-NEXT: retl
|
; X86-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_stream_sd:
|
; X64-LABEL: test_stream_sd:
|
||||||
; X64: # %bb.0:
|
; X64: # %bb.0:
|
||||||
|
@ -84,11 +64,11 @@ define void @test_stream_sd(double* %p, <2 x double> %a) {
|
||||||
}
|
}
|
||||||
|
|
||||||
define void @test_mm_stream_ss(float* %p, <4 x float> %a) {
|
define void @test_mm_stream_ss(float* %p, <4 x float> %a) {
|
||||||
; X32-LABEL: test_mm_stream_ss:
|
; X86-LABEL: test_mm_stream_ss:
|
||||||
; X32: # %bb.0:
|
; X86: # %bb.0:
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X32-NEXT: movntss %xmm0, (%eax)
|
; X86-NEXT: movntss %xmm0, (%eax)
|
||||||
; X32-NEXT: retl
|
; X86-NEXT: retl
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_mm_stream_ss:
|
; X64-LABEL: test_mm_stream_ss:
|
||||||
; X64: # %bb.0:
|
; X64: # %bb.0:
|
||||||
|
|
|
@ -1,20 +1,20 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X32
|
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=X86
|
||||||
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X32
|
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=X86
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X64
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=X64
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X64
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=X64
|
||||||
|
|
||||||
define void @test_movntss(i8* %p, <4 x float> %a) nounwind optsize ssp {
|
define void @test_movntss(i8* %p, <4 x float> %a) nounwind optsize ssp {
|
||||||
; X32-LABEL: test_movntss:
|
; X86-LABEL: test_movntss:
|
||||||
; X32: # %bb.0:
|
; X86: # %bb.0:
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; X32-NEXT: movntss %xmm0, (%eax)
|
; X86-NEXT: movntss %xmm0, (%eax) # encoding: [0xf3,0x0f,0x2b,0x00]
|
||||||
; X32-NEXT: retl
|
; X86-NEXT: retl # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_movntss:
|
; X64-LABEL: test_movntss:
|
||||||
; X64: # %bb.0:
|
; X64: # %bb.0:
|
||||||
; X64-NEXT: movntss %xmm0, (%rdi)
|
; X64-NEXT: movntss %xmm0, (%rdi) # encoding: [0xf3,0x0f,0x2b,0x07]
|
||||||
; X64-NEXT: retq
|
; X64-NEXT: retq # encoding: [0xc3]
|
||||||
tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a) nounwind
|
tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a) nounwind
|
||||||
ret void
|
ret void
|
||||||
}
|
}
|
||||||
|
@ -22,16 +22,16 @@ define void @test_movntss(i8* %p, <4 x float> %a) nounwind optsize ssp {
|
||||||
declare void @llvm.x86.sse4a.movnt.ss(i8*, <4 x float>)
|
declare void @llvm.x86.sse4a.movnt.ss(i8*, <4 x float>)
|
||||||
|
|
||||||
define void @test_movntsd(i8* %p, <2 x double> %a) nounwind optsize ssp {
|
define void @test_movntsd(i8* %p, <2 x double> %a) nounwind optsize ssp {
|
||||||
; X32-LABEL: test_movntsd:
|
; X86-LABEL: test_movntsd:
|
||||||
; X32: # %bb.0:
|
; X86: # %bb.0:
|
||||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; X32-NEXT: movntsd %xmm0, (%eax)
|
; X86-NEXT: movntsd %xmm0, (%eax) # encoding: [0xf2,0x0f,0x2b,0x00]
|
||||||
; X32-NEXT: retl
|
; X86-NEXT: retl # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X64-LABEL: test_movntsd:
|
; X64-LABEL: test_movntsd:
|
||||||
; X64: # %bb.0:
|
; X64: # %bb.0:
|
||||||
; X64-NEXT: movntsd %xmm0, (%rdi)
|
; X64-NEXT: movntsd %xmm0, (%rdi) # encoding: [0xf2,0x0f,0x2b,0x07]
|
||||||
; X64-NEXT: retq
|
; X64-NEXT: retq # encoding: [0xc3]
|
||||||
tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a) nounwind
|
tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a) nounwind
|
||||||
ret void
|
ret void
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,49 +1,44 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X32 --check-prefix=X32-SSE
|
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
|
||||||
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX
|
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
|
||||||
|
|
||||||
define <2 x i64> @test_extrqi(<2 x i64> %x) nounwind uwtable ssp {
|
define <2 x i64> @test_extrqi(<2 x i64> %x) nounwind uwtable ssp {
|
||||||
; X32-LABEL: test_extrqi:
|
; CHECK-LABEL: test_extrqi:
|
||||||
; X32: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; X32-NEXT: extrq $2, $3, %xmm0
|
; CHECK-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
|
||||||
; X32-NEXT: retl
|
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||||
;
|
|
||||||
; X64-LABEL: test_extrqi:
|
|
||||||
; X64: # %bb.0:
|
|
||||||
; X64-NEXT: extrq $2, $3, %xmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2)
|
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2)
|
||||||
ret <2 x i64> %1
|
ret <2 x i64> %1
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i64> @test_extrqi_domain(<2 x i64> *%p) nounwind uwtable ssp {
|
define <2 x i64> @test_extrqi_domain(<2 x i64> *%p) nounwind uwtable ssp {
|
||||||
; X32-SSE-LABEL: test_extrqi_domain:
|
; X86-SSE-LABEL: test_extrqi_domain:
|
||||||
; X32-SSE: # %bb.0:
|
; X86-SSE: # %bb.0:
|
||||||
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; X32-SSE-NEXT: movdqa (%eax), %xmm0
|
; X86-SSE-NEXT: movdqa (%eax), %xmm0 # encoding: [0x66,0x0f,0x6f,0x00]
|
||||||
; X32-SSE-NEXT: extrq $2, $3, %xmm0
|
; X86-SSE-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
|
||||||
; X32-SSE-NEXT: retl
|
; X86-SSE-NEXT: retl # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X32-AVX-LABEL: test_extrqi_domain:
|
; X86-AVX-LABEL: test_extrqi_domain:
|
||||||
; X32-AVX: # %bb.0:
|
; X86-AVX: # %bb.0:
|
||||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; X32-AVX-NEXT: vmovdqa (%eax), %xmm0
|
; X86-AVX-NEXT: vmovdqa (%eax), %xmm0 # encoding: [0xc5,0xf9,0x6f,0x00]
|
||||||
; X32-AVX-NEXT: extrq $2, $3, %xmm0
|
; X86-AVX-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
|
||||||
; X32-AVX-NEXT: retl
|
; X86-AVX-NEXT: retl # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X64-SSE-LABEL: test_extrqi_domain:
|
; X64-SSE-LABEL: test_extrqi_domain:
|
||||||
; X64-SSE: # %bb.0:
|
; X64-SSE: # %bb.0:
|
||||||
; X64-SSE-NEXT: movdqa (%rdi), %xmm0
|
; X64-SSE-NEXT: movdqa (%rdi), %xmm0 # encoding: [0x66,0x0f,0x6f,0x07]
|
||||||
; X64-SSE-NEXT: extrq $2, $3, %xmm0
|
; X64-SSE-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
|
||||||
; X64-SSE-NEXT: retq
|
; X64-SSE-NEXT: retq # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X64-AVX-LABEL: test_extrqi_domain:
|
; X64-AVX-LABEL: test_extrqi_domain:
|
||||||
; X64-AVX: # %bb.0:
|
; X64-AVX: # %bb.0:
|
||||||
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm0
|
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm0 # encoding: [0xc5,0xf9,0x6f,0x07]
|
||||||
; X64-AVX-NEXT: extrq $2, $3, %xmm0
|
; X64-AVX-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
|
||||||
; X64-AVX-NEXT: retq
|
; X64-AVX-NEXT: retq # encoding: [0xc3]
|
||||||
%1 = load <2 x i64>, <2 x i64> *%p
|
%1 = load <2 x i64>, <2 x i64> *%p
|
||||||
%2 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %1, i8 3, i8 2)
|
%2 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %1, i8 3, i8 2)
|
||||||
ret <2 x i64> %2
|
ret <2 x i64> %2
|
||||||
|
@ -52,50 +47,45 @@ define <2 x i64> @test_extrqi_domain(<2 x i64> *%p) nounwind uwtable ssp {
|
||||||
declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind
|
declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind
|
||||||
|
|
||||||
define <2 x i64> @test_extrq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
|
define <2 x i64> @test_extrq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
|
||||||
; X32-LABEL: test_extrq:
|
; CHECK-LABEL: test_extrq:
|
||||||
; X32: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; X32-NEXT: extrq %xmm1, %xmm0
|
; CHECK-NEXT: extrq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x79,0xc1]
|
||||||
; X32-NEXT: retl
|
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||||
;
|
|
||||||
; X64-LABEL: test_extrq:
|
|
||||||
; X64: # %bb.0:
|
|
||||||
; X64-NEXT: extrq %xmm1, %xmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
%1 = bitcast <2 x i64> %y to <16 x i8>
|
%1 = bitcast <2 x i64> %y to <16 x i8>
|
||||||
%2 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %1) nounwind
|
%2 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %1) nounwind
|
||||||
ret <2 x i64> %2
|
ret <2 x i64> %2
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
|
define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
|
||||||
; X32-SSE-LABEL: test_extrq_domain:
|
; X86-SSE-LABEL: test_extrq_domain:
|
||||||
; X32-SSE: # %bb.0:
|
; X86-SSE: # %bb.0:
|
||||||
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; X32-SSE-NEXT: movdqa (%eax), %xmm1
|
; X86-SSE-NEXT: movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
|
||||||
; X32-SSE-NEXT: extrq %xmm0, %xmm1
|
; X86-SSE-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
|
||||||
; X32-SSE-NEXT: movdqa %xmm1, %xmm0
|
; X86-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
|
||||||
; X32-SSE-NEXT: retl
|
; X86-SSE-NEXT: retl # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X32-AVX-LABEL: test_extrq_domain:
|
; X86-AVX-LABEL: test_extrq_domain:
|
||||||
; X32-AVX: # %bb.0:
|
; X86-AVX: # %bb.0:
|
||||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
|
; X86-AVX-NEXT: vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
|
||||||
; X32-AVX-NEXT: extrq %xmm0, %xmm1
|
; X86-AVX-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
|
||||||
; X32-AVX-NEXT: vmovdqa %xmm1, %xmm0
|
; X86-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
|
||||||
; X32-AVX-NEXT: retl
|
; X86-AVX-NEXT: retl # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X64-SSE-LABEL: test_extrq_domain:
|
; X64-SSE-LABEL: test_extrq_domain:
|
||||||
; X64-SSE: # %bb.0:
|
; X64-SSE: # %bb.0:
|
||||||
; X64-SSE-NEXT: movdqa (%rdi), %xmm1
|
; X64-SSE-NEXT: movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
|
||||||
; X64-SSE-NEXT: extrq %xmm0, %xmm1
|
; X64-SSE-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
|
||||||
; X64-SSE-NEXT: movdqa %xmm1, %xmm0
|
; X64-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
|
||||||
; X64-SSE-NEXT: retq
|
; X64-SSE-NEXT: retq # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X64-AVX-LABEL: test_extrq_domain:
|
; X64-AVX-LABEL: test_extrq_domain:
|
||||||
; X64-AVX: # %bb.0:
|
; X64-AVX: # %bb.0:
|
||||||
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
|
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
|
||||||
; X64-AVX-NEXT: extrq %xmm0, %xmm1
|
; X64-AVX-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
|
||||||
; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
|
; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
|
||||||
; X64-AVX-NEXT: retq
|
; X64-AVX-NEXT: retq # encoding: [0xc3]
|
||||||
%1 = load <2 x i64>, <2 x i64> *%p
|
%1 = load <2 x i64>, <2 x i64> *%p
|
||||||
%2 = bitcast <2 x i64> %y to <16 x i8>
|
%2 = bitcast <2 x i64> %y to <16 x i8>
|
||||||
%3 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %1, <16 x i8> %2) nounwind
|
%3 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %1, <16 x i8> %2) nounwind
|
||||||
|
@ -105,49 +95,44 @@ define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtabl
|
||||||
declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind
|
declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind
|
||||||
|
|
||||||
define <2 x i64> @test_insertqi(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
|
define <2 x i64> @test_insertqi(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
|
||||||
; X32-LABEL: test_insertqi:
|
; CHECK-LABEL: test_insertqi:
|
||||||
; X32: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; X32-NEXT: insertq $6, $5, %xmm1, %xmm0
|
; CHECK-NEXT: insertq $6, $5, %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x78,0xc1,0x05,0x06]
|
||||||
; X32-NEXT: retl
|
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||||
;
|
|
||||||
; X64-LABEL: test_insertqi:
|
|
||||||
; X64: # %bb.0:
|
|
||||||
; X64-NEXT: insertq $6, $5, %xmm1, %xmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6)
|
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6)
|
||||||
ret <2 x i64> %1
|
ret <2 x i64> %1
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
|
define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
|
||||||
; X32-SSE-LABEL: test_insertqi_domain:
|
; X86-SSE-LABEL: test_insertqi_domain:
|
||||||
; X32-SSE: # %bb.0:
|
; X86-SSE: # %bb.0:
|
||||||
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; X32-SSE-NEXT: movdqa (%eax), %xmm1
|
; X86-SSE-NEXT: movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
|
||||||
; X32-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1
|
; X86-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
|
||||||
; X32-SSE-NEXT: movdqa %xmm1, %xmm0
|
; X86-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
|
||||||
; X32-SSE-NEXT: retl
|
; X86-SSE-NEXT: retl # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X32-AVX-LABEL: test_insertqi_domain:
|
; X86-AVX-LABEL: test_insertqi_domain:
|
||||||
; X32-AVX: # %bb.0:
|
; X86-AVX: # %bb.0:
|
||||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
|
; X86-AVX-NEXT: vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
|
||||||
; X32-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1
|
; X86-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
|
||||||
; X32-AVX-NEXT: vmovdqa %xmm1, %xmm0
|
; X86-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
|
||||||
; X32-AVX-NEXT: retl
|
; X86-AVX-NEXT: retl # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X64-SSE-LABEL: test_insertqi_domain:
|
; X64-SSE-LABEL: test_insertqi_domain:
|
||||||
; X64-SSE: # %bb.0:
|
; X64-SSE: # %bb.0:
|
||||||
; X64-SSE-NEXT: movdqa (%rdi), %xmm1
|
; X64-SSE-NEXT: movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
|
||||||
; X64-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1
|
; X64-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
|
||||||
; X64-SSE-NEXT: movdqa %xmm1, %xmm0
|
; X64-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
|
||||||
; X64-SSE-NEXT: retq
|
; X64-SSE-NEXT: retq # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X64-AVX-LABEL: test_insertqi_domain:
|
; X64-AVX-LABEL: test_insertqi_domain:
|
||||||
; X64-AVX: # %bb.0:
|
; X64-AVX: # %bb.0:
|
||||||
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
|
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
|
||||||
; X64-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1
|
; X64-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
|
||||||
; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
|
; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
|
||||||
; X64-AVX-NEXT: retq
|
; X64-AVX-NEXT: retq # encoding: [0xc3]
|
||||||
%1 = load <2 x i64>, <2 x i64> *%p
|
%1 = load <2 x i64>, <2 x i64> *%p
|
||||||
%2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %y, i8 5, i8 6)
|
%2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %y, i8 5, i8 6)
|
||||||
ret <2 x i64> %2
|
ret <2 x i64> %2
|
||||||
|
@ -156,49 +141,44 @@ define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwt
|
||||||
declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind
|
declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind
|
||||||
|
|
||||||
define <2 x i64> @test_insertq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
|
define <2 x i64> @test_insertq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
|
||||||
; X32-LABEL: test_insertq:
|
; CHECK-LABEL: test_insertq:
|
||||||
; X32: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; X32-NEXT: insertq %xmm1, %xmm0
|
; CHECK-NEXT: insertq %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x79,0xc1]
|
||||||
; X32-NEXT: retl
|
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||||
;
|
|
||||||
; X64-LABEL: test_insertq:
|
|
||||||
; X64: # %bb.0:
|
|
||||||
; X64-NEXT: insertq %xmm1, %xmm0
|
|
||||||
; X64-NEXT: retq
|
|
||||||
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y) nounwind
|
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y) nounwind
|
||||||
ret <2 x i64> %1
|
ret <2 x i64> %1
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i64> @test_insertq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
|
define <2 x i64> @test_insertq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
|
||||||
; X32-SSE-LABEL: test_insertq_domain:
|
; X86-SSE-LABEL: test_insertq_domain:
|
||||||
; X32-SSE: # %bb.0:
|
; X86-SSE: # %bb.0:
|
||||||
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; X32-SSE-NEXT: movdqa (%eax), %xmm1
|
; X86-SSE-NEXT: movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
|
||||||
; X32-SSE-NEXT: insertq %xmm0, %xmm1
|
; X86-SSE-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
|
||||||
; X32-SSE-NEXT: movdqa %xmm1, %xmm0
|
; X86-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
|
||||||
; X32-SSE-NEXT: retl
|
; X86-SSE-NEXT: retl # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X32-AVX-LABEL: test_insertq_domain:
|
; X86-AVX-LABEL: test_insertq_domain:
|
||||||
; X32-AVX: # %bb.0:
|
; X86-AVX: # %bb.0:
|
||||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
|
||||||
; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
|
; X86-AVX-NEXT: vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
|
||||||
; X32-AVX-NEXT: insertq %xmm0, %xmm1
|
; X86-AVX-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
|
||||||
; X32-AVX-NEXT: vmovdqa %xmm1, %xmm0
|
; X86-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
|
||||||
; X32-AVX-NEXT: retl
|
; X86-AVX-NEXT: retl # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X64-SSE-LABEL: test_insertq_domain:
|
; X64-SSE-LABEL: test_insertq_domain:
|
||||||
; X64-SSE: # %bb.0:
|
; X64-SSE: # %bb.0:
|
||||||
; X64-SSE-NEXT: movdqa (%rdi), %xmm1
|
; X64-SSE-NEXT: movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
|
||||||
; X64-SSE-NEXT: insertq %xmm0, %xmm1
|
; X64-SSE-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
|
||||||
; X64-SSE-NEXT: movdqa %xmm1, %xmm0
|
; X64-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
|
||||||
; X64-SSE-NEXT: retq
|
; X64-SSE-NEXT: retq # encoding: [0xc3]
|
||||||
;
|
;
|
||||||
; X64-AVX-LABEL: test_insertq_domain:
|
; X64-AVX-LABEL: test_insertq_domain:
|
||||||
; X64-AVX: # %bb.0:
|
; X64-AVX: # %bb.0:
|
||||||
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
|
; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
|
||||||
; X64-AVX-NEXT: insertq %xmm0, %xmm1
|
; X64-AVX-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
|
||||||
; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
|
; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
|
||||||
; X64-AVX-NEXT: retq
|
; X64-AVX-NEXT: retq # encoding: [0xc3]
|
||||||
%1 = load <2 x i64>, <2 x i64> *%p
|
%1 = load <2 x i64>, <2 x i64> *%p
|
||||||
%2 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %1, <2 x i64> %y) nounwind
|
%2 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %1, <2 x i64> %y) nounwind
|
||||||
ret <2 x i64> %2
|
ret <2 x i64> %2
|
||||||
|
|
Loading…
Reference in New Issue