forked from OSchip/llvm-project
7613 lines
309 KiB
LLVM
7613 lines
309 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64
|
|
; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86
|
|
|
|
|
|
define <8 x double> @test_mask_compress_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_mask_compress_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcompresspd %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_compress_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcompresspd %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i8 %mask to <8 x i1>
|
|
%2 = call <8 x double> @llvm.x86.avx512.mask.compress.v8f64(<8 x double> %data, <8 x double> %passthru, <8 x i1> %1)
|
|
ret <8 x double> %2
|
|
}
|
|
|
|
define <8 x double> @test_maskz_compress_pd_512(<8 x double> %data, i8 %mask) {
|
|
; X64-LABEL: test_maskz_compress_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcompresspd %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_compress_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcompresspd %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i8 %mask to <8 x i1>
|
|
%2 = call <8 x double> @llvm.x86.avx512.mask.compress.v8f64(<8 x double> %data, <8 x double> zeroinitializer, <8 x i1> %1)
|
|
ret <8 x double> %2
|
|
}
|
|
|
|
define <8 x double> @test_compress_pd_512(<8 x double> %data) {
|
|
; CHECK-LABEL: test_compress_pd_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <8 x double> @llvm.x86.avx512.mask.compress.v8f64(<8 x double> %data, <8 x double> undef, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
|
|
ret <8 x double> %1
|
|
}
|
|
|
|
define <16 x float> @test_mask_compress_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_mask_compress_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcompressps %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_compress_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcompressps %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i16 %mask to <16 x i1>
|
|
%2 = call <16 x float> @llvm.x86.avx512.mask.compress.v16f32(<16 x float> %data, <16 x float> %passthru, <16 x i1> %1)
|
|
ret <16 x float> %2
|
|
}
|
|
|
|
define <16 x float> @test_maskz_compress_ps_512(<16 x float> %data, i16 %mask) {
|
|
; X64-LABEL: test_maskz_compress_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcompressps %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_compress_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcompressps %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i16 %mask to <16 x i1>
|
|
%2 = call <16 x float> @llvm.x86.avx512.mask.compress.v16f32(<16 x float> %data, <16 x float> zeroinitializer, <16 x i1> %1)
|
|
ret <16 x float> %2
|
|
}
|
|
|
|
define <16 x float> @test_compress_ps_512(<16 x float> %data) {
|
|
; CHECK-LABEL: test_compress_ps_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.mask.compress.v16f32(<16 x float> %data, <16 x float> undef, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <8 x i64> @test_mask_compress_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_mask_compress_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpcompressq %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_compress_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpcompressq %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i8 %mask to <8 x i1>
|
|
%2 = call <8 x i64> @llvm.x86.avx512.mask.compress.v8i64(<8 x i64> %data, <8 x i64> %passthru, <8 x i1> %1)
|
|
ret <8 x i64> %2
|
|
}
|
|
|
|
define <8 x i64> @test_maskz_compress_q_512(<8 x i64> %data, i8 %mask) {
|
|
; X64-LABEL: test_maskz_compress_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpcompressq %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_compress_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpcompressq %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i8 %mask to <8 x i1>
|
|
%2 = call <8 x i64> @llvm.x86.avx512.mask.compress.v8i64(<8 x i64> %data, <8 x i64> zeroinitializer, <8 x i1> %1)
|
|
ret <8 x i64> %2
|
|
}
|
|
|
|
define <8 x i64> @test_compress_q_512(<8 x i64> %data) {
|
|
; CHECK-LABEL: test_compress_q_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <8 x i64> @llvm.x86.avx512.mask.compress.v8i64(<8 x i64> %data, <8 x i64> undef, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
|
|
ret <8 x i64> %1
|
|
}
|
|
|
|
define <16 x i32> @test_mask_compress_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_mask_compress_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpcompressd %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_compress_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpcompressd %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i16 %mask to <16 x i1>
|
|
%2 = call <16 x i32> @llvm.x86.avx512.mask.compress.v16i32(<16 x i32> %data, <16 x i32> %passthru, <16 x i1> %1)
|
|
ret <16 x i32> %2
|
|
}
|
|
|
|
define <16 x i32> @test_maskz_compress_d_512(<16 x i32> %data, i16 %mask) {
|
|
; X64-LABEL: test_maskz_compress_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_compress_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpcompressd %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i16 %mask to <16 x i1>
|
|
%2 = call <16 x i32> @llvm.x86.avx512.mask.compress.v16i32(<16 x i32> %data, <16 x i32> zeroinitializer, <16 x i1> %1)
|
|
ret <16 x i32> %2
|
|
}
|
|
|
|
define <16 x i32> @test_compress_d_512(<16 x i32> %data) {
|
|
; CHECK-LABEL: test_compress_d_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x i32> @llvm.x86.avx512.mask.compress.v16i32(<16 x i32> %data, <16 x i32> undef, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
|
|
ret <16 x i32> %1
|
|
}
|
|
|
|
define <8 x double> @test_expand_pd_512(<8 x double> %data) {
|
|
; CHECK-LABEL: test_expand_pd_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <8 x double> @llvm.x86.avx512.mask.expand.v8f64(<8 x double> %data, <8 x double> undef, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
|
|
ret <8 x double> %1
|
|
}
|
|
|
|
define <8 x double> @test_mask_expand_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_mask_expand_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vexpandpd %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_expand_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vexpandpd %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i8 %mask to <8 x i1>
|
|
%2 = call <8 x double> @llvm.x86.avx512.mask.expand.v8f64(<8 x double> %data, <8 x double> %passthru, <8 x i1> %1)
|
|
ret <8 x double> %2
|
|
}
|
|
|
|
define <8 x double> @test_maskz_expand_pd_512(<8 x double> %data, i8 %mask) {
|
|
; X64-LABEL: test_maskz_expand_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_expand_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i8 %mask to <8 x i1>
|
|
%2 = call <8 x double> @llvm.x86.avx512.mask.expand.v8f64(<8 x double> %data, <8 x double> zeroinitializer, <8 x i1> %1)
|
|
ret <8 x double> %2
|
|
}
|
|
|
|
define <16 x float> @test_expand_ps_512(<16 x float> %data) {
|
|
; CHECK-LABEL: test_expand_ps_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.mask.expand.v16f32(<16 x float> %data, <16 x float> undef, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mask_expand_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_mask_expand_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vexpandps %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_expand_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vexpandps %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i16 %mask to <16 x i1>
|
|
%2 = call <16 x float> @llvm.x86.avx512.mask.expand.v16f32(<16 x float> %data, <16 x float> %passthru, <16 x i1> %1)
|
|
ret <16 x float> %2
|
|
}
|
|
|
|
define <16 x float> @test_maskz_expand_ps_512(<16 x float> %data, i16 %mask) {
|
|
; X64-LABEL: test_maskz_expand_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_expand_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i16 %mask to <16 x i1>
|
|
%2 = call <16 x float> @llvm.x86.avx512.mask.expand.v16f32(<16 x float> %data, <16 x float> zeroinitializer, <16 x i1> %1)
|
|
ret <16 x float> %2
|
|
}
|
|
|
|
define <8 x i64> @test_expand_q_512(<8 x i64> %data) {
|
|
; CHECK-LABEL: test_expand_q_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <8 x i64> @llvm.x86.avx512.mask.expand.v8i64(<8 x i64> %data, <8 x i64> undef, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
|
|
ret <8 x i64> %1
|
|
}
|
|
|
|
define <8 x i64> @test_mask_expand_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_mask_expand_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpexpandq %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_expand_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpexpandq %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i8 %mask to <8 x i1>
|
|
%2 = call <8 x i64> @llvm.x86.avx512.mask.expand.v8i64(<8 x i64> %data, <8 x i64> %passthru, <8 x i1> %1)
|
|
ret <8 x i64> %2
|
|
}
|
|
|
|
define <8 x i64> @test_maskz_expand_q_512(<8 x i64> %data, i8 %mask) {
|
|
; X64-LABEL: test_maskz_expand_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_expand_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i8 %mask to <8 x i1>
|
|
%2 = call <8 x i64> @llvm.x86.avx512.mask.expand.v8i64(<8 x i64> %data, <8 x i64> zeroinitializer, <8 x i1> %1)
|
|
ret <8 x i64> %2
|
|
}
|
|
|
|
define <16 x i32> @test_expand_d_512(<16 x i32> %data) {
|
|
; CHECK-LABEL: test_expand_d_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x i32> @llvm.x86.avx512.mask.expand.v16i32(<16 x i32> %data, <16 x i32> undef, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
|
|
ret <16 x i32> %1
|
|
}
|
|
|
|
define <16 x i32> @test_mask_expand_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_mask_expand_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpexpandd %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_expand_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpexpandd %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i16 %mask to <16 x i1>
|
|
%2 = call <16 x i32> @llvm.x86.avx512.mask.expand.v16i32(<16 x i32> %data, <16 x i32> %passthru, <16 x i1> %1)
|
|
ret <16 x i32> %2
|
|
}
|
|
|
|
define <16 x i32> @test_maskz_expand_d_512(<16 x i32> %data, i16 %mask) {
|
|
; X64-LABEL: test_maskz_expand_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_expand_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = bitcast i16 %mask to <16 x i1>
|
|
%2 = call <16 x i32> @llvm.x86.avx512.mask.expand.v16i32(<16 x i32> %data, <16 x i32> zeroinitializer, <16 x i1> %1)
|
|
ret <16 x i32> %2
|
|
}
|
|
|
|
define <16 x float> @test_rcp_ps_512(<16 x float> %a0) {
|
|
; CHECK-LABEL: test_rcp_ps_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vrcp14ps %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
|
|
ret <16 x float> %res
|
|
}
|
|
declare <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float>, <16 x float>, i16) nounwind readnone
|
|
|
|
define <8 x double> @test_rcp_pd_512(<8 x double> %a0) {
|
|
; CHECK-LABEL: test_rcp_pd_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vrcp14pd %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x double> @llvm.x86.avx512.rcp14.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1) ; <<8 x double>> [#uses=1]
|
|
ret <8 x double> %res
|
|
}
|
|
declare <8 x double> @llvm.x86.avx512.rcp14.pd.512(<8 x double>, <8 x double>, i8) nounwind readnone
|
|
|
|
declare <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32, i32)
|
|
|
|
define <2 x double> @test_rndscale_sd(<2 x double> %a, <2 x double> %b) {
|
|
; CHECK-LABEL: test_rndscale_sd:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vroundsd $11, %xmm1, %xmm0, %xmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double> %a, <2 x double> %b, <2 x double> undef, i8 -1, i32 11, i32 4)
|
|
ret <2 x double>%res
|
|
}
|
|
|
|
define <2 x double> @test_rndscale_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
|
|
; X64-LABEL: test_rndscale_sd_mask:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vrndscalesd $11, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_rndscale_sd_mask:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vrndscalesd $11, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 11, i32 4)
|
|
ret <2 x double>%res
|
|
}
|
|
|
|
define <2 x double> @test_rndscale_sd_mask_load(<2 x double> %a, <2 x double>* %bptr, <2 x double> %c, i8 %mask) {
|
|
; X64-LABEL: test_rndscale_sd_mask_load:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vrndscalesd $11, (%rdi), %xmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vmovapd %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_rndscale_sd_mask_load:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vrndscalesd $11, (%eax), %xmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vmovapd %xmm1, %xmm0
|
|
; X86-NEXT: retl
|
|
%b = load <2 x double>, <2 x double>* %bptr
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 11, i32 4)
|
|
ret <2 x double>%res
|
|
}
|
|
|
|
define <2 x double> @test_rndscale_sd_maskz(<2 x double> %a, <2 x double> %b, i8 %mask) {
|
|
; X64-LABEL: test_rndscale_sd_maskz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vrndscalesd $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_rndscale_sd_maskz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vrndscalesd $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double> %a, <2 x double> %b, <2 x double> zeroinitializer, i8 %mask, i32 11, i32 4)
|
|
ret <2 x double>%res
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32, i32)
|
|
|
|
define <4 x float> @test_rndscale_ss(<4 x float> %a, <4 x float> %b) {
|
|
; CHECK-LABEL: test_rndscale_ss:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vroundss $11, %xmm1, %xmm0, %xmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 11, i32 4)
|
|
ret <4 x float>%res
|
|
}
|
|
|
|
define <4 x float> @test_rndscale_ss_load(<4 x float> %a, <4 x float>* %bptr) {
|
|
; X64-LABEL: test_rndscale_ss_load:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vroundss $11, (%rdi), %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_rndscale_ss_load:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vroundss $11, (%eax), %xmm0, %xmm0
|
|
; X86-NEXT: retl
|
|
%b = load <4 x float>, <4 x float>* %bptr
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 11, i32 4)
|
|
ret <4 x float>%res
|
|
}
|
|
|
|
define <4 x float> @test_rndscale_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
|
|
; X64-LABEL: test_rndscale_ss_mask:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vrndscaless $11, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_rndscale_ss_mask:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vrndscaless $11, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 11, i32 4)
|
|
ret <4 x float>%res
|
|
}
|
|
|
|
define <4 x float> @test_rndscale_ss_maskz(<4 x float> %a, <4 x float> %b, i8 %mask) {
|
|
; X64-LABEL: test_rndscale_ss_maskz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vrndscaless $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_rndscale_ss_maskz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vrndscaless $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 %mask, i32 11, i32 4)
|
|
ret <4 x float>%res
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double>, i32, <8 x double>, i8, i32)
|
|
|
|
define <8 x double> @test7(<8 x double> %a) {
|
|
; CHECK-LABEL: test7:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vrndscalepd $11, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double> %a, i32 11, <8 x double> %a, i8 -1, i32 4)
|
|
ret <8 x double>%res
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float>, i32, <16 x float>, i16, i32)
|
|
|
|
define <16 x float> @test8(<16 x float> %a) {
|
|
; CHECK-LABEL: test8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vrndscaleps $11, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float> %a, i32 11, <16 x float> %a, i16 -1, i32 4)
|
|
ret <16 x float>%res
|
|
}
|
|
|
|
define <16 x float> @test_rsqrt_ps_512(<16 x float> %a0) {
|
|
; CHECK-LABEL: test_rsqrt_ps_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vrsqrt14ps %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
|
|
ret <16 x float> %res
|
|
}
|
|
declare <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float>, <16 x float>, i16) nounwind readnone
|
|
|
|
define <8 x double> @test_sqrt_pd_512(<8 x double> %a0) {
|
|
; CHECK-LABEL: test_sqrt_pd_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsqrtpd %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a0)
|
|
ret <8 x double> %1
|
|
}
|
|
|
|
define <8 x double> @test_mask_sqrt_pd_512(<8 x double> %a0, <8 x double> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_mask_sqrt_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsqrtpd %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovapd %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_sqrt_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vsqrtpd %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovapd %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a0)
|
|
%2 = bitcast i8 %mask to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x double> %1, <8 x double> %passthru
|
|
ret <8 x double> %3
|
|
}
|
|
|
|
define <8 x double> @test_maskz_sqrt_pd_512(<8 x double> %a0, i8 %mask) {
|
|
; X64-LABEL: test_maskz_sqrt_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsqrtpd %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_sqrt_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vsqrtpd %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a0)
|
|
%2 = bitcast i8 %mask to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
|
|
ret <8 x double> %3
|
|
}
|
|
declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
|
|
|
|
define <8 x double> @test_sqrt_round_pd_512(<8 x double> %a0) {
|
|
; CHECK-LABEL: test_sqrt_round_pd_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsqrtpd {rz-sae}, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <8 x double> @llvm.x86.avx512.sqrt.pd.512(<8 x double> %a0, i32 11)
|
|
ret <8 x double> %1
|
|
}
|
|
|
|
define <8 x double> @test_mask_sqrt_round_pd_512(<8 x double> %a0, <8 x double> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_mask_sqrt_round_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsqrtpd {rz-sae}, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovapd %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_sqrt_round_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vsqrtpd {rz-sae}, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovapd %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x double> @llvm.x86.avx512.sqrt.pd.512(<8 x double> %a0, i32 11)
|
|
%2 = bitcast i8 %mask to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x double> %1, <8 x double> %passthru
|
|
ret <8 x double> %3
|
|
}
|
|
|
|
define <8 x double> @test_maskz_sqrt_round_pd_512(<8 x double> %a0, i8 %mask) {
|
|
; X64-LABEL: test_maskz_sqrt_round_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsqrtpd {rz-sae}, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_sqrt_round_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vsqrtpd {rz-sae}, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x double> @llvm.x86.avx512.sqrt.pd.512(<8 x double> %a0, i32 11)
|
|
%2 = bitcast i8 %mask to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
|
|
ret <8 x double> %3
|
|
}
|
|
declare <8 x double> @llvm.x86.avx512.sqrt.pd.512(<8 x double>, i32) nounwind readnone
|
|
|
|
define <16 x float> @test_sqrt_ps_512(<16 x float> %a0) {
|
|
; CHECK-LABEL: test_sqrt_ps_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsqrtps %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a0)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mask_sqrt_ps_512(<16 x float> %a0, <16 x float> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_mask_sqrt_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsqrtps %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovaps %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_sqrt_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vsqrtps %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovaps %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a0)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_maskz_sqrt_ps_512(<16 x float> %a0, i16 %mask) {
|
|
; X64-LABEL: test_maskz_sqrt_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsqrtps %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_sqrt_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vsqrtps %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a0)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
|
|
|
|
define <16 x float> @test_sqrt_round_ps_512(<16 x float> %a0) {
|
|
; CHECK-LABEL: test_sqrt_round_ps_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsqrtps {rz-sae}, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.sqrt.ps.512(<16 x float> %a0, i32 11)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mask_sqrt_round_ps_512(<16 x float> %a0, <16 x float> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_mask_sqrt_round_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsqrtps {rz-sae}, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovaps %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_sqrt_round_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vsqrtps {rz-sae}, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovaps %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.sqrt.ps.512(<16 x float> %a0, i32 11)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_maskz_sqrt_round_ps_512(<16 x float> %a0, i16 %mask) {
|
|
; X64-LABEL: test_maskz_sqrt_round_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsqrtps {rz-sae}, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_sqrt_round_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vsqrtps {rz-sae}, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.sqrt.ps.512(<16 x float> %a0, i32 11)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
declare <16 x float> @llvm.x86.avx512.sqrt.ps.512(<16 x float>, i32) nounwind readnone
|
|
|
|
define <8 x double> @test_getexp_pd_512(<8 x double> %a0) {
|
|
; CHECK-LABEL: test_getexp_pd_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vgetexppd %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 4)
|
|
ret <8 x double> %res
|
|
}
|
|
define <8 x double> @test_getexp_round_pd_512(<8 x double> %a0) {
|
|
; CHECK-LABEL: test_getexp_round_pd_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vgetexppd {sae}, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 12)
|
|
ret <8 x double> %res
|
|
}
|
|
declare <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double>, <8 x double>, i8, i32) nounwind readnone
|
|
|
|
define <16 x float> @test_getexp_ps_512(<16 x float> %a0) {
|
|
; CHECK-LABEL: test_getexp_ps_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vgetexpps %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
|
|
ret <16 x float> %res
|
|
}
|
|
|
|
define <16 x float> @test_getexp_round_ps_512(<16 x float> %a0) {
|
|
; CHECK-LABEL: test_getexp_round_ps_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vgetexpps {sae}, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
|
|
ret <16 x float> %res
|
|
}
|
|
declare <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float>, <16 x float>, i16, i32) nounwind readnone
|
|
|
|
declare <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
|
|
|
|
define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
|
|
; X64-LABEL: test_sqrt_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovaps %xmm2, %xmm3
|
|
; X64-NEXT: vsqrtss %xmm1, %xmm0, %xmm3 {%k1}
|
|
; X64-NEXT: vsqrtss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vaddps %xmm2, %xmm3, %xmm2
|
|
; X64-NEXT: vsqrtss {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
|
|
; X64-NEXT: vsqrtss {rz-sae}, %xmm1, %xmm0, %xmm0
|
|
; X64-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: vaddps %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_sqrt_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovaps %xmm2, %xmm3
|
|
; X86-NEXT: vsqrtss %xmm1, %xmm0, %xmm3 {%k1}
|
|
; X86-NEXT: vsqrtss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vaddps %xmm2, %xmm3, %xmm2
|
|
; X86-NEXT: vsqrtss {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
|
|
; X86-NEXT: vsqrtss {rz-sae}, %xmm1, %xmm0, %xmm0
|
|
; X86-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: vaddps %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
|
|
%res1 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 9)
|
|
%res2 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 10)
|
|
%res3 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 11)
|
|
|
|
%res.1 = fadd <4 x float> %res0, %res1
|
|
%res.2 = fadd <4 x float> %res2, %res3
|
|
%res = fadd <4 x float> %res.1, %res.2
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
|
|
|
|
define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
|
|
; X64-LABEL: test_sqrt_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovapd %xmm2, %xmm3
|
|
; X64-NEXT: vsqrtsd %xmm1, %xmm0, %xmm3 {%k1}
|
|
; X64-NEXT: vsqrtsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vaddpd %xmm2, %xmm3, %xmm2
|
|
; X64-NEXT: vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
|
|
; X64-NEXT: vsqrtsd {rz-sae}, %xmm1, %xmm0, %xmm0
|
|
; X64-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: vaddpd %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_sqrt_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %xmm2, %xmm3
|
|
; X86-NEXT: vsqrtsd %xmm1, %xmm0, %xmm3 {%k1}
|
|
; X86-NEXT: vsqrtsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vaddpd %xmm2, %xmm3, %xmm2
|
|
; X86-NEXT: vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
|
|
; X86-NEXT: vsqrtsd {rz-sae}, %xmm1, %xmm0, %xmm0
|
|
; X86-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: vaddpd %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
|
|
%res1 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 9)
|
|
%res2 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 10)
|
|
%res3 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 11)
|
|
|
|
%res.1 = fadd <2 x double> %res0, %res1
|
|
%res.2 = fadd <2 x double> %res2, %res3
|
|
%res = fadd <2 x double> %res.1, %res.2
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define i32 @test_x86_avx512_cvttsd2usi(<2 x double> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_cvttsd2usi:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcvttsd2usi %xmm0, %ecx
|
|
; CHECK-NEXT: vcvttsd2usi {sae}, %xmm0, %eax
|
|
; CHECK-NEXT: addl %ecx, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res0 = call i32 @llvm.x86.avx512.cvttsd2usi(<2 x double> %a0, i32 4) ;
|
|
%res1 = call i32 @llvm.x86.avx512.cvttsd2usi(<2 x double> %a0, i32 8) ;
|
|
%res2 = add i32 %res0, %res1
|
|
ret i32 %res2
|
|
}
|
|
declare i32 @llvm.x86.avx512.cvttsd2usi(<2 x double>, i32) nounwind readnone
|
|
|
|
define i32 @test_x86_avx512_cvttsd2si(<2 x double> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_cvttsd2si:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcvttsd2si %xmm0, %ecx
|
|
; CHECK-NEXT: vcvttsd2si {sae}, %xmm0, %eax
|
|
; CHECK-NEXT: addl %ecx, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res0 = call i32 @llvm.x86.avx512.cvttsd2si(<2 x double> %a0, i32 4) ;
|
|
%res1 = call i32 @llvm.x86.avx512.cvttsd2si(<2 x double> %a0, i32 8) ;
|
|
%res2 = add i32 %res0, %res1
|
|
ret i32 %res2
|
|
}
|
|
declare i32 @llvm.x86.avx512.cvttsd2si(<2 x double>, i32) nounwind readnone
|
|
|
|
define i32 @test_x86_avx512_cvttss2si(<4 x float> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_cvttss2si:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcvttss2si {sae}, %xmm0, %ecx
|
|
; CHECK-NEXT: vcvttss2si %xmm0, %eax
|
|
; CHECK-NEXT: addl %ecx, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res0 = call i32 @llvm.x86.avx512.cvttss2si(<4 x float> %a0, i32 8) ;
|
|
%res1 = call i32 @llvm.x86.avx512.cvttss2si(<4 x float> %a0, i32 4) ;
|
|
%res2 = add i32 %res0, %res1
|
|
ret i32 %res2
|
|
}
|
|
declare i32 @llvm.x86.avx512.cvttss2si(<4 x float>, i32) nounwind readnone
|
|
|
|
define i32 @test_x86_avx512_cvttss2si_load(<4 x float>* %a0) {
|
|
; X64-LABEL: test_x86_avx512_cvttss2si_load:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcvttss2si (%rdi), %eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_cvttss2si_load:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vcvttss2si (%eax), %eax
|
|
; X86-NEXT: retl
|
|
%a1 = load <4 x float>, <4 x float>* %a0
|
|
%res = call i32 @llvm.x86.avx512.cvttss2si(<4 x float> %a1, i32 4) ;
|
|
ret i32 %res
|
|
}
|
|
|
|
define i32 @test_x86_avx512_cvttss2usi(<4 x float> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_cvttss2usi:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcvttss2usi {sae}, %xmm0, %ecx
|
|
; CHECK-NEXT: vcvttss2usi %xmm0, %eax
|
|
; CHECK-NEXT: addl %ecx, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res0 = call i32 @llvm.x86.avx512.cvttss2usi(<4 x float> %a0, i32 8) ;
|
|
%res1 = call i32 @llvm.x86.avx512.cvttss2usi(<4 x float> %a0, i32 4) ;
|
|
%res2 = add i32 %res0, %res1
|
|
ret i32 %res2
|
|
}
|
|
declare i32 @llvm.x86.avx512.cvttss2usi(<4 x float>, i32) nounwind readnone
|
|
|
|
define i32 @test_x86_avx512_cvtsd2usi32(<2 x double> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_cvtsd2usi32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcvtsd2usi %xmm0, %eax
|
|
; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %ecx
|
|
; CHECK-NEXT: addl %eax, %ecx
|
|
; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %eax
|
|
; CHECK-NEXT: addl %ecx, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
|
|
%res = call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> %a0, i32 4)
|
|
%res1 = call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> %a0, i32 11)
|
|
%res2 = call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> %a0, i32 9)
|
|
%res3 = add i32 %res, %res1
|
|
%res4 = add i32 %res3, %res2
|
|
ret i32 %res4
|
|
}
|
|
declare i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double>, i32) nounwind readnone
|
|
|
|
define i32 @test_x86_avx512_cvtsd2si32(<2 x double> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_cvtsd2si32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcvtsd2si %xmm0, %eax
|
|
; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %ecx
|
|
; CHECK-NEXT: addl %eax, %ecx
|
|
; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %eax
|
|
; CHECK-NEXT: addl %ecx, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
|
|
%res = call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> %a0, i32 4)
|
|
%res1 = call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> %a0, i32 11)
|
|
%res2 = call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> %a0, i32 9)
|
|
%res3 = add i32 %res, %res1
|
|
%res4 = add i32 %res3, %res2
|
|
ret i32 %res4
|
|
}
|
|
declare i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double>, i32) nounwind readnone
|
|
|
|
define i32 @test_x86_avx512_cvtss2usi32(<4 x float> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_cvtss2usi32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcvtss2usi %xmm0, %eax
|
|
; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %ecx
|
|
; CHECK-NEXT: addl %eax, %ecx
|
|
; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %eax
|
|
; CHECK-NEXT: addl %ecx, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
|
|
%res = call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> %a0, i32 4)
|
|
%res1 = call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> %a0, i32 11)
|
|
%res2 = call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> %a0, i32 9)
|
|
%res3 = add i32 %res, %res1
|
|
%res4 = add i32 %res3, %res2
|
|
ret i32 %res4
|
|
}
|
|
declare i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float>, i32) nounwind readnone
|
|
|
|
define i32 @test_x86_avx512_cvtss2si32(<4 x float> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_cvtss2si32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcvtss2si %xmm0, %eax
|
|
; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %ecx
|
|
; CHECK-NEXT: addl %eax, %ecx
|
|
; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %eax
|
|
; CHECK-NEXT: addl %ecx, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
|
|
%res = call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> %a0, i32 4)
|
|
%res1 = call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> %a0, i32 11)
|
|
%res2 = call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> %a0, i32 9)
|
|
%res3 = add i32 %res, %res1
|
|
%res4 = add i32 %res3, %res2
|
|
ret i32 %res4
|
|
}
|
|
declare i32 @llvm.x86.avx512.vcvtss2si32(<4 x float>, i32) nounwind readnone
|
|
|
|
define <16 x float> @test_x86_vcvtph2ps_512(<16 x i16> %a0) {
|
|
; CHECK-LABEL: test_x86_vcvtph2ps_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcvtph2ps %ymm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
|
|
ret <16 x float> %res
|
|
}
|
|
|
|
define <16 x float> @test_x86_vcvtph2ps_512_sae(<16 x i16> %a0) {
|
|
; CHECK-LABEL: test_x86_vcvtph2ps_512_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcvtph2ps {sae}, %ymm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
|
|
ret <16 x float> %res
|
|
}
|
|
|
|
define <16 x float> @test_x86_vcvtph2ps_512_rrk(<16 x i16> %a0,<16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_x86_vcvtph2ps_512_rrk:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtph2ps %ymm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovaps %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_vcvtph2ps_512_rrk:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcvtph2ps %ymm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovaps %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> %a1, i16 %mask, i32 4)
|
|
ret <16 x float> %res
|
|
}
|
|
|
|
define <16 x float> @test_x86_vcvtph2ps_512_sae_rrkz(<16 x i16> %a0, i16 %mask) {
|
|
; X64-LABEL: test_x86_vcvtph2ps_512_sae_rrkz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtph2ps {sae}, %ymm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_vcvtph2ps_512_sae_rrkz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcvtph2ps {sae}, %ymm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 %mask, i32 8)
|
|
ret <16 x float> %res
|
|
}
|
|
|
|
define <16 x float> @test_x86_vcvtph2ps_512_rrkz(<16 x i16> %a0, i16 %mask) {
|
|
; X64-LABEL: test_x86_vcvtph2ps_512_rrkz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtph2ps %ymm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_vcvtph2ps_512_rrkz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcvtph2ps %ymm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 %mask, i32 4)
|
|
ret <16 x float> %res
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16>, <16 x float>, i16, i32) nounwind readonly
|
|
|
|
define <16 x i16> @test_x86_vcvtps2ph_256(<16 x float> %a0, <16 x i16> %src, i16 %mask, <16 x i16> * %dst) {
|
|
; X64-LABEL: test_x86_vcvtps2ph_256:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtps2ph $2, %zmm0, %ymm2 {%k1} {z}
|
|
; X64-NEXT: vcvtps2ph $2, %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vpaddw %ymm1, %ymm2, %ymm1
|
|
; X64-NEXT: vcvtps2ph $2, %zmm0, (%rsi)
|
|
; X64-NEXT: vmovdqa %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_vcvtps2ph_256:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcvtps2ph $2, %zmm0, %ymm2 {%k1} {z}
|
|
; X86-NEXT: vcvtps2ph $2, %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vpaddw %ymm1, %ymm2, %ymm1
|
|
; X86-NEXT: vcvtps2ph $2, %zmm0, (%eax)
|
|
; X86-NEXT: vmovdqa %ymm1, %ymm0
|
|
; X86-NEXT: retl
|
|
%res1 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 -1)
|
|
%res2 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 %mask)
|
|
%res3 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> %src, i16 %mask)
|
|
store <16 x i16> %res1, <16 x i16> * %dst
|
|
%res = add <16 x i16> %res2, %res3
|
|
ret <16 x i16> %res
|
|
}
|
|
|
|
declare <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float>, i32, <16 x i16>, i16) nounwind readonly
|
|
|
|
define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) {
|
|
; CHECK-LABEL: test_cmpps:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; CHECK-NEXT: vzeroupper
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x i1> @llvm.x86.avx512.cmp.ps.512(<16 x float> %a, <16 x float> %b, i32 2, i32 8)
|
|
%1 = bitcast <16 x i1> %res to i16
|
|
ret i16 %1
|
|
}
|
|
declare <16 x i1> @llvm.x86.avx512.cmp.ps.512(<16 x float>, <16 x float>, i32, i32)
|
|
|
|
define i8 @test_cmppd(<8 x double> %a, <8 x double> %b) {
|
|
; CHECK-LABEL: test_cmppd:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcmpneqpd %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: # kill: def $al killed $al killed $eax
|
|
; CHECK-NEXT: vzeroupper
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x i1> @llvm.x86.avx512.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 4, i32 4)
|
|
%1 = bitcast <8 x i1> %res to i8
|
|
ret i8 %1
|
|
}
|
|
declare <8 x i1> @llvm.x86.avx512.cmp.pd.512(<8 x double>, <8 x double>, i32, i32)
|
|
|
|
; Function Attrs: nounwind readnone
|
|
|
|
; fp min - max
|
|
define <8 x double> @test_vmaxpd(<8 x double> %a0, <8 x double> %a1) {
|
|
; CHECK-LABEL: test_vmaxpd:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <8 x double> @llvm.x86.avx512.max.pd.512(<8 x double> %a0, <8 x double> %a1, i32 4)
|
|
ret <8 x double> %1
|
|
}
|
|
declare <8 x double> @llvm.x86.avx512.max.pd.512(<8 x double>, <8 x double>, i32)
|
|
|
|
define <8 x double> @test_vminpd(<8 x double> %a0, <8 x double> %a1) {
|
|
; CHECK-LABEL: test_vminpd:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vminpd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double> %a0, <8 x double> %a1, i32 4)
|
|
ret <8 x double> %1
|
|
}
|
|
declare <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double>, <8 x double>, i32)
|
|
|
|
define void @test_mask_store_ss(i8* %ptr, <4 x float> %data, i8 %mask) {
|
|
; X64-LABEL: test_mask_store_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vmovss %xmm0, (%rdi) {%k1}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_store_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
|
|
; X86-NEXT: kmovw %ecx, %k1
|
|
; X86-NEXT: vmovss %xmm0, (%eax) {%k1}
|
|
; X86-NEXT: retl
|
|
%1 = and i8 %mask, 1
|
|
%2 = bitcast i8* %ptr to <4 x float>*
|
|
%3 = bitcast i8 %1 to <8 x i1>
|
|
%extract = shufflevector <8 x i1> %3, <8 x i1> %3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %data, <4 x float>* %2, i32 1, <4 x i1> %extract)
|
|
ret void
|
|
}
|
|
declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>) #1
|
|
|
|
|
|
declare <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float>, <16 x float>, i32)
|
|
declare <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float>, <16 x float>, i32)
|
|
declare <8 x double> @llvm.x86.avx512.mul.pd.512(<8 x double>, <8 x double>, i32)
|
|
|
|
define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) {
|
|
; CHECK-LABEL: test_vsubps_rn:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsubps {rn-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) {
|
|
; CHECK-LABEL: test_vsubps_rd:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsubps {rd-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) {
|
|
; CHECK-LABEL: test_vsubps_ru:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsubps {ru-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) {
|
|
; CHECK-LABEL: test_vsubps_rz:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsubps {rz-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) {
|
|
; CHECK-LABEL: test_vmulps_rn:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vmulps {rn-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) {
|
|
; CHECK-LABEL: test_vmulps_rd:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vmulps {rd-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) {
|
|
; CHECK-LABEL: test_vmulps_ru:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vmulps {ru-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) {
|
|
; CHECK-LABEL: test_vmulps_rz:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vmulps {rz-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
;; mask float
|
|
define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_vmulps_mask_rn:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulps_mask_rn:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmulps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_vmulps_mask_rd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulps_mask_rd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmulps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_vmulps_mask_ru:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulps_mask_ru:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmulps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_vmulps_mask_rz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulps_mask_rz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmulps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
;; With Passthru value
|
|
define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_vmulps_mask_passthru_rn:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulps_mask_passthru_rn:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmulps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_vmulps_mask_passthru_rd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulps_mask_passthru_rd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmulps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_vmulps_mask_passthru_ru:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulps_mask_passthru_ru:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmulps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_vmulps_mask_passthru_rz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulps_mask_passthru_rz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmulps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
;; mask double
|
|
define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
|
|
; X64-LABEL: test_vmulpd_mask_rn:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulpd {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulpd_mask_rn:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmulpd {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x double> @llvm.x86.avx512.mul.pd.512(<8 x double> %a0, <8 x double> %a1, i32 8)
|
|
%2 = bitcast i8 %mask to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
|
|
ret <8 x double> %3
|
|
}
|
|
|
|
define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
|
|
; X64-LABEL: test_vmulpd_mask_rd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulpd {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulpd_mask_rd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmulpd {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x double> @llvm.x86.avx512.mul.pd.512(<8 x double> %a0, <8 x double> %a1, i32 9)
|
|
%2 = bitcast i8 %mask to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
|
|
ret <8 x double> %3
|
|
}
|
|
|
|
define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
|
|
; X64-LABEL: test_vmulpd_mask_ru:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulpd {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulpd_mask_ru:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmulpd {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x double> @llvm.x86.avx512.mul.pd.512(<8 x double> %a0, <8 x double> %a1, i32 10)
|
|
%2 = bitcast i8 %mask to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
|
|
ret <8 x double> %3
|
|
}
|
|
|
|
define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
|
|
; X64-LABEL: test_vmulpd_mask_rz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmulpd {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_vmulpd_mask_rz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmulpd {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x double> @llvm.x86.avx512.mul.pd.512(<8 x double> %a0, <8 x double> %a1, i32 11)
|
|
%2 = bitcast i8 %mask to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
|
|
ret <8 x double> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_add_round_ps_rn_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_add_round_ps_rn_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vaddps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_add_round_ps_rd_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_add_round_ps_rd_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vaddps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_add_round_ps_ru_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_add_round_ps_ru_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vaddps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_add_round_ps_rz_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_add_round_ps_rz_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vaddps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_add_round_ps_current:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddps %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_add_round_ps_current:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vaddps %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_add_round_ps_rn_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_add_round_ps_rn_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vaddps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_add_round_ps_rd_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_add_round_ps_rd_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vaddps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_add_round_ps_ru_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_add_round_ps_ru_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vaddps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_add_round_ps_rz_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_add_round_ps_rz_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vaddps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_add_round_ps_current:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_add_round_ps_current:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vaddps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_add_round_ps_rn_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vaddps {rn-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_add_round_ps_rd_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vaddps {rd-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_add_round_ps_ru_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vaddps {ru-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_add_round_ps_rz_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vaddps {rz-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_add_round_ps_current:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
ret <16 x float> %1
|
|
}
|
|
declare <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float>, <16 x float>, i32)
|
|
|
|
define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_sub_round_ps_rn_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsubps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_sub_round_ps_rn_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vsubps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_sub_round_ps_rd_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsubps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_sub_round_ps_rd_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vsubps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_sub_round_ps_ru_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsubps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_sub_round_ps_ru_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vsubps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_sub_round_ps_rz_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsubps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_sub_round_ps_rz_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vsubps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_sub_round_ps_current:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vsubps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_sub_round_ps_current:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vsubps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_sub_round_ps_rn_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsubps {rn-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_sub_round_ps_rd_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsubps {rd-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_sub_round_ps_ru_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsubps {ru-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_sub_round_ps_rz_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsubps {rz-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_sub_round_ps_current:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsubps %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_div_round_ps_rn_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vdivps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_div_round_ps_rn_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vdivps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_div_round_ps_rd_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vdivps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_div_round_ps_rd_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vdivps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_div_round_ps_ru_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vdivps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_div_round_ps_ru_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vdivps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_div_round_ps_rz_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vdivps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_div_round_ps_rz_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vdivps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_div_round_ps_current:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vdivps %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_div_round_ps_current:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vdivps %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_div_round_ps_rn_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vdivps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_div_round_ps_rn_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vdivps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_div_round_ps_rd_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vdivps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_div_round_ps_rd_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vdivps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_div_round_ps_ru_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vdivps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_div_round_ps_ru_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vdivps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_div_round_ps_rz_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vdivps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_div_round_ps_rz_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vdivps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_div_round_ps_current:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vdivps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_div_round_ps_current:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vdivps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_div_round_ps_rn_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vdivps {rn-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_div_round_ps_rd_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vdivps {rd-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 9)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_div_round_ps_ru_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vdivps {ru-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 10)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_div_round_ps_rz_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vdivps {rz-sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 11)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_div_round_ps_current:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vdivps %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
ret <16 x float> %1
|
|
}
|
|
declare <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float>, <16 x float>, i32)
|
|
|
|
define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_min_round_ps_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vminps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_min_round_ps_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vminps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_min_round_ps_current:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vminps %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_min_round_ps_current:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vminps %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_min_round_ps_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vminps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_min_round_ps_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vminps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_min_round_ps_current:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vminps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_min_round_ps_current:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vminps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_min_round_ps_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vminps {sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_min_round_ps_current:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vminps %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
ret <16 x float> %1
|
|
}
|
|
declare <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float>, <16 x float>, i32)
|
|
|
|
define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_max_round_ps_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_max_round_ps_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmaxps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; X64-LABEL: test_mm512_maskz_max_round_ps_current:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxps %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_maskz_max_round_ps_current:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmaxps %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_max_round_ps_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_max_round_ps_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmaxps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
|
|
; X64-LABEL: test_mm512_mask_max_round_ps_current:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mm512_mask_max_round_ps_current:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmaxps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
%2 = bitcast i16 %mask to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_max_round_ps_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vmaxps {sae}, %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x float> @test_mm512_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mm512_max_round_ps_current:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vmaxps %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
|
|
ret <16 x float> %1
|
|
}
|
|
declare <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float>, <16 x float>, i32)
|
|
|
|
declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
|
|
|
|
define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_ss_rn:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_ss_rn:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 8)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_ss_rd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_ss_rd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 9)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_ss_ru:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddss {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_ss_ru:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddss {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 10)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_ss_rz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_ss_rz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 11)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_ss_current:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddss %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_ss_current:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddss %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
|
|
; X64-LABEL: test_maskz_add_ss_rn:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_add_ss_rn:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 8)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) {
|
|
; CHECK-LABEL: test_add_ss_rn:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 8)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_ss_current_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vaddss (%rdi), %xmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vmovaps %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_ss_current_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vaddss (%eax), %xmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vmovaps %xmm1, %xmm0
|
|
; X86-NEXT: retl
|
|
%a1.val = load float, float* %a1
|
|
%a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
|
|
%a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
|
|
%a1v2 = insertelement <4 x float> %a1v1, float 0.000000e+00, i32 2
|
|
%a1v = insertelement <4 x float> %a1v2, float 0.000000e+00, i32 3
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1v, <4 x float> %a2, i8 %mask, i32 4)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_maskz_add_ss_current_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
|
|
; X64-LABEL: test_maskz_add_ss_current_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_add_ss_current_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vaddss (%eax), %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%a1.val = load float, float* %a1
|
|
%a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
|
|
%a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
|
|
%a1v2 = insertelement <4 x float> %a1v1, float 0.000000e+00, i32 2
|
|
%a1v = insertelement <4 x float> %a1v2, float 0.000000e+00, i32 3
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1v, <4 x float> zeroinitializer, i8 %mask, i32 4)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
|
|
|
|
define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_sd_rn:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_sd_rn:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 8)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_sd_rd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_sd_rd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 9)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_sd_ru:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddsd {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_sd_ru:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddsd {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 10)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_sd_rz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddsd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_sd_rz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddsd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 11)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_sd_current:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddsd %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_sd_current:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddsd %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
|
|
; X64-LABEL: test_maskz_add_sd_rn:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_add_sd_rn:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 8)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) {
|
|
; CHECK-LABEL: test_add_sd_rn:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 8)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_add_sd_current_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vaddsd (%rdi), %xmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vmovapd %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_add_sd_current_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vaddsd (%eax), %xmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vmovapd %xmm1, %xmm0
|
|
; X86-NEXT: retl
|
|
%a1.val = load double, double* %a1
|
|
%a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
|
|
%a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1v, <2 x double> %a2, i8 %mask, i32 4)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_maskz_add_sd_current_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
|
|
; X64-LABEL: test_maskz_add_sd_current_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vaddsd (%rdi), %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_add_sd_current_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vaddsd (%eax), %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%a1.val = load double, double* %a1
|
|
%a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
|
|
%a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1v, <2 x double> zeroinitializer, i8 %mask, i32 4)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
|
|
|
|
define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_max_ss_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_max_ss_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 8)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
|
|
; X64-LABEL: test_maskz_max_ss_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_max_ss_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 8)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) {
|
|
; CHECK-LABEL: test_max_ss_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 8)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_max_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxss %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_max_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovaps %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
|
|
; X64-LABEL: test_maskz_max_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxss %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_max_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 4)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) {
|
|
; CHECK-LABEL: test_max_ss:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 4)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_max_ss_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vmaxss (%rdi), %xmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vmovaps %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_max_ss_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vmaxss (%eax), %xmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vmovaps %xmm1, %xmm0
|
|
; X86-NEXT: retl
|
|
%a1.val = load float, float* %a1
|
|
%a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
|
|
%a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
|
|
%a1v2 = insertelement <4 x float> %a1v1, float 0.000000e+00, i32 2
|
|
%a1v = insertelement <4 x float> %a1v2, float 0.000000e+00, i32 3
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1v, <4 x float> %a2, i8 %mask, i32 4)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_maskz_max_ss_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
|
|
; X64-LABEL: test_maskz_max_ss_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_max_ss_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vmaxss (%eax), %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%a1.val = load float, float* %a1
|
|
%a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
|
|
%a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
|
|
%a1v2 = insertelement <4 x float> %a1v1, float 0.000000e+00, i32 2
|
|
%a1v = insertelement <4 x float> %a1v2, float 0.000000e+00, i32 3
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1v, <4 x float> zeroinitializer, i8 %mask, i32 4)
|
|
ret <4 x float> %res
|
|
}
|
|
declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
|
|
|
|
define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_max_sd_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_max_sd_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 8)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
|
|
; X64-LABEL: test_maskz_max_sd_sae:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_max_sd_sae:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 8)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) {
|
|
; CHECK-LABEL: test_max_sd_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 8)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_max_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxsd %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_max_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmaxsd %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vmovapd %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
|
|
; X64-LABEL: test_maskz_max_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_max_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 4)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) {
|
|
; CHECK-LABEL: test_max_sd:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 4)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
|
|
; X64-LABEL: test_mask_max_sd_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vmaxsd (%rdi), %xmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vmovapd %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_mask_max_sd_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vmaxsd (%eax), %xmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vmovapd %xmm1, %xmm0
|
|
; X86-NEXT: retl
|
|
%a1.val = load double, double* %a1
|
|
%a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
|
|
%a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1v, <2 x double> %a2, i8 %mask, i32 4)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
|
|
; X64-LABEL: test_maskz_max_sd_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_maskz_max_sd_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vmaxsd (%eax), %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%a1.val = load double, double* %a1
|
|
%a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
|
|
%a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1v, <2 x double> zeroinitializer, i8 %mask, i32 4)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_avx512_cvtsi2ss32(<4 x float> %a, i32 %b) {
|
|
; X64-LABEL: test_x86_avx512_cvtsi2ss32:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcvtsi2ss %edi, {rz-sae}, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_cvtsi2ss32:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vcvtsi2ss %eax, {rz-sae}, %xmm0, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float> %a, i32 %b, i32 11) ; <<<4 x float>> [#uses=1]
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float>, i32, i32) nounwind readnone
|
|
|
|
define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b) {
|
|
; X64-LABEL: test_x86_avx512__mm_cvt_roundu32_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcvtusi2ss %edi, {rd-sae}, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512__mm_cvt_roundu32_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vcvtusi2ss %eax, {rd-sae}, %xmm0, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 9) ; <<<4 x float>> [#uses=1]
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss_mem(<4 x float> %a, i32* %ptr) {
|
|
; X64-LABEL: test_x86_avx512__mm_cvt_roundu32_ss_mem:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl (%rdi), %eax
|
|
; X64-NEXT: vcvtusi2ss %eax, {rd-sae}, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512__mm_cvt_roundu32_ss_mem:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl (%eax), %eax
|
|
; X86-NEXT: vcvtusi2ss %eax, {rd-sae}, %xmm0, %xmm0
|
|
; X86-NEXT: retl
|
|
%b = load i32, i32* %ptr
|
|
%res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 9) ; <<<4 x float>> [#uses=1]
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b) {
|
|
; X64-LABEL: test_x86_avx512__mm_cvtu32_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcvtusi2ss %edi, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512__mm_cvtu32_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vcvtusi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 4) ; <<<4 x float>> [#uses=1]
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <4 x float> @test_x86_avx512__mm_cvtu32_ss_mem(<4 x float> %a, i32* %ptr) {
|
|
; X64-LABEL: test_x86_avx512__mm_cvtu32_ss_mem:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vcvtusi2ssl (%rdi), %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512__mm_cvtu32_ss_mem:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vcvtusi2ssl (%eax), %xmm0, %xmm0
|
|
; X86-NEXT: retl
|
|
%b = load i32, i32* %ptr
|
|
%res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 4) ; <<<4 x float>> [#uses=1]
|
|
ret <4 x float> %res
|
|
}
|
|
declare <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float>, i32, i32) nounwind readnone
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32>, <16 x i32>, <16 x i32>)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_vpermi2var_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm3
|
|
; X64-NEXT: vpermi2d (%rdi), %zmm0, %zmm3 {%k1}
|
|
; X64-NEXT: vpermt2d %zmm2, %zmm1, %zmm0
|
|
; X64-NEXT: vpaddd %zmm0, %zmm3, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_vpermi2var_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm3
|
|
; X86-NEXT: vpermi2d (%eax), %zmm0, %zmm3 {%k1}
|
|
; X86-NEXT: vpermt2d %zmm2, %zmm1, %zmm0
|
|
; X86-NEXT: vpaddd %zmm0, %zmm3, %zmm0
|
|
; X86-NEXT: retl
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2p
|
|
%1 = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2)
|
|
%2 = bitcast i16 %x3 to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x1
|
|
%4 = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4)
|
|
%res2 = add <16 x i32> %3, %4
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double>, <8 x i64>, <8 x double>)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_vpermi2var_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovapd %zmm0, %zmm3
|
|
; X64-NEXT: vpermt2pd %zmm2, %zmm1, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermi2pd %zmm2, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vaddpd %zmm3, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovapd %zmm0, %zmm3
|
|
; X86-NEXT: vpermt2pd %zmm2, %zmm1, %zmm3
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpermi2pd %zmm2, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vaddpd %zmm3, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2)
|
|
%2 = bitcast <8 x i64> %x1 to <8 x double>
|
|
%3 = bitcast i8 %x3 to <8 x i1>
|
|
%4 = select <8 x i1> %3, <8 x double> %1, <8 x double> %2
|
|
%5 = call <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2)
|
|
%6 = bitcast <8 x i64> %x1 to <8 x double>
|
|
%res2 = fadd <8 x double> %4, %5
|
|
ret <8 x double> %res2
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float>, <16 x i32>, <16 x float>)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_vpermi2var_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovaps %zmm0, %zmm3
|
|
; X64-NEXT: vpermt2ps %zmm2, %zmm1, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermi2ps %zmm2, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vaddps %zmm3, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovaps %zmm0, %zmm3
|
|
; X86-NEXT: vpermt2ps %zmm2, %zmm1, %zmm3
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpermi2ps %zmm2, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vaddps %zmm3, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2)
|
|
%2 = bitcast <16 x i32> %x1 to <16 x float>
|
|
%3 = bitcast i16 %x3 to <16 x i1>
|
|
%4 = select <16 x i1> %3, <16 x float> %1, <16 x float> %2
|
|
%5 = call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2)
|
|
%6 = bitcast <16 x i32> %x1 to <16 x float>
|
|
%res2 = fadd <16 x float> %4, %5
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64>, <8 x i64>, <8 x i64>)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_vpermi2var_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovdqa64 %zmm0, %zmm3
|
|
; X64-NEXT: vpermt2q %zmm2, %zmm1, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermi2q %zmm2, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vpaddq %zmm3, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_vpermi2var_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovdqa64 %zmm0, %zmm3
|
|
; X86-NEXT: vpermt2q %zmm2, %zmm1, %zmm3
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpermi2q %zmm2, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vpaddq %zmm3, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2)
|
|
%2 = bitcast i8 %x3 to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x1
|
|
%4 = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2)
|
|
%res2 = add <8 x i64> %3, %4
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, i16 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm2
|
|
; X64-NEXT: vpermt2d (%rdi), %zmm0, %zmm2 {%k1} {z}
|
|
; X64-NEXT: vpermt2d %zmm1, %zmm0, %zmm1
|
|
; X64-NEXT: vpaddd %zmm1, %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm2
|
|
; X86-NEXT: vpermt2d (%eax), %zmm0, %zmm2 {%k1} {z}
|
|
; X86-NEXT: vpermt2d %zmm1, %zmm0, %zmm1
|
|
; X86-NEXT: vpaddd %zmm1, %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2p
|
|
%1 = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2)
|
|
%2 = bitcast i16 %x3 to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> zeroinitializer
|
|
%4 = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x1)
|
|
%res2 = add <16 x i32> %3, %4
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, <8 x double> %x1, double* %x2ptr, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_maskz_vpermt2var_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vmovapd %zmm1, %zmm2
|
|
; X64-NEXT: vpermt2pd (%rdi){1to8}, %zmm0, %zmm2 {%k1} {z}
|
|
; X64-NEXT: vpermt2pd %zmm1, %zmm0, %zmm1
|
|
; X64-NEXT: vaddpd %zmm1, %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_vpermt2var_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: kmovw %ecx, %k1
|
|
; X86-NEXT: vmovapd %zmm1, %zmm2
|
|
; X86-NEXT: vpermt2pd (%eax){1to8}, %zmm0, %zmm2 {%k1} {z}
|
|
; X86-NEXT: vpermt2pd %zmm1, %zmm0, %zmm1
|
|
; X86-NEXT: vaddpd %zmm1, %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%x2s = load double, double* %x2ptr
|
|
%x2ins = insertelement <8 x double> undef, double %x2s, i32 0
|
|
%x2 = shufflevector <8 x double> %x2ins, <8 x double> undef, <8 x i32> zeroinitializer
|
|
%1 = call <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double> %x1, <8 x i64> %x0, <8 x double> %x2)
|
|
%2 = bitcast i8 %x3 to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
|
|
%4 = call <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double> %x1, <8 x i64> %x0, <8 x double> %x1)
|
|
%res2 = fadd <8 x double> %3, %4
|
|
ret <8 x double> %res2
|
|
}
|
|
|
|
define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_maskz_vpermt2var_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovaps %zmm1, %zmm3
|
|
; X64-NEXT: vpermt2ps %zmm2, %zmm0, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermt2ps %zmm2, %zmm0, %zmm1 {%k1} {z}
|
|
; X64-NEXT: vaddps %zmm3, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_vpermt2var_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovaps %zmm1, %zmm3
|
|
; X86-NEXT: vpermt2ps %zmm2, %zmm0, %zmm3
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpermt2ps %zmm2, %zmm0, %zmm1 {%k1} {z}
|
|
; X86-NEXT: vaddps %zmm3, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %x1, <16 x i32> %x0, <16 x float> %x2)
|
|
%2 = bitcast i16 %x3 to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
|
|
%4 = call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %x1, <16 x i32> %x0, <16 x float> %x2)
|
|
%res2 = fadd <16 x float> %3, %4
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_maskz_vpermt2var_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm3
|
|
; X64-NEXT: vpermt2q %zmm2, %zmm0, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermt2q %zmm2, %zmm0, %zmm1 {%k1} {z}
|
|
; X64-NEXT: vpaddq %zmm3, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_vpermt2var_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm3
|
|
; X86-NEXT: vpermt2q %zmm2, %zmm0, %zmm3
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpermt2q %zmm2, %zmm0, %zmm1 {%k1} {z}
|
|
; X86-NEXT: vpaddq %zmm3, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> %x1, <8 x i64> %x0, <8 x i64> %x2)
|
|
%2 = bitcast i8 %x3 to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> zeroinitializer
|
|
%4 = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> %x1, <8 x i64> %x0, <8 x i64> %x2)
|
|
%res2 = add <8 x i64> %3, %4
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_vpermt2var_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm3
|
|
; X64-NEXT: vpermt2d %zmm2, %zmm0, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermt2d %zmm2, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vpaddd %zmm3, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_vpermt2var_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm3
|
|
; X86-NEXT: vpermt2d %zmm2, %zmm0, %zmm3
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpermt2d %zmm2, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vpaddd %zmm3, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2)
|
|
%2 = bitcast i16 %x3 to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x1
|
|
%4 = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2)
|
|
%res2 = add <16 x i32> %3, %4
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32)
|
|
define <8 x double>@test_int_x86_avx512_mask_scalef_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_scalef_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vscalefpd {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vscalefpd {rn-sae}, %zmm1, %zmm0, %zmm0
|
|
; X64-NEXT: vaddpd %zmm0, %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_scalef_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vscalefpd {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vscalefpd {rn-sae}, %zmm1, %zmm0, %zmm0
|
|
; X86-NEXT: vaddpd %zmm0, %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 11)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 8)
|
|
%res2 = fadd <8 x double> %res, %res1
|
|
ret <8 x double> %res2
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
|
|
define <16 x float>@test_int_x86_avx512_mask_scalef_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_scalef_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vscalefps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vscalefps {rn-sae}, %zmm1, %zmm0, %zmm0
|
|
; X64-NEXT: vaddps %zmm0, %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_scalef_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vscalefps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vscalefps {rn-sae}, %zmm1, %zmm0, %zmm0
|
|
; X86-NEXT: vaddps %zmm0, %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 10)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 8)
|
|
%res2 = fadd <16 x float> %res, %res1
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
declare <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64>, <16 x i8>, i8)
|
|
|
|
define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmov_qb_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovqb %zmm0, %xmm2
|
|
; X64-NEXT: vpmovqb %zmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vpmovqb %zmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X64-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmov_qb_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpmovqb %zmm0, %xmm2
|
|
; X86-NEXT: vpmovqb %zmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vpmovqb %zmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X86-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
%res0 = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 -1)
|
|
%res1 = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2)
|
|
%res2 = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> %x0, <16 x i8> zeroinitializer, i8 %x2)
|
|
%res3 = add <16 x i8> %res0, %res1
|
|
%res4 = add <16 x i8> %res3, %res2
|
|
ret <16 x i8> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmov.qb.mem.512(i8* %ptr, <8 x i64>, i8)
|
|
|
|
define void @test_int_x86_avx512_mask_pmov_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmov_qb_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovqb %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovqb %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmov_qb_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovqb %zmm0, (%eax)
|
|
; X86-NEXT: vpmovqb %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmov.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
|
|
call void @llvm.x86.avx512.mask.pmov.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64>, <16 x i8>, i8)
|
|
|
|
define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovs_qb_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovsqb %zmm0, %xmm2
|
|
; X64-NEXT: vpmovsqb %zmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vpmovsqb %zmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X64-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovs_qb_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpmovsqb %zmm0, %xmm2
|
|
; X86-NEXT: vpmovsqb %zmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vpmovsqb %zmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X86-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
%res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 -1)
|
|
%res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2)
|
|
%res2 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64> %x0, <16 x i8> zeroinitializer, i8 %x2)
|
|
%res3 = add <16 x i8> %res0, %res1
|
|
%res4 = add <16 x i8> %res3, %res2
|
|
ret <16 x i8> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmovs.qb.mem.512(i8* %ptr, <8 x i64>, i8)
|
|
|
|
define void @test_int_x86_avx512_mask_pmovs_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovs_qb_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovsqb %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovsqb %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovs_qb_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovsqb %zmm0, (%eax)
|
|
; X86-NEXT: vpmovsqb %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmovs.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
|
|
call void @llvm.x86.avx512.mask.pmovs.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64>, <16 x i8>, i8)
|
|
|
|
define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovus_qb_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovusqb %zmm0, %xmm2
|
|
; X64-NEXT: vpmovusqb %zmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vpmovusqb %zmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X64-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovus_qb_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpmovusqb %zmm0, %xmm2
|
|
; X86-NEXT: vpmovusqb %zmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vpmovusqb %zmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X86-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
%res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 -1)
|
|
%res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2)
|
|
%res2 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64> %x0, <16 x i8> zeroinitializer, i8 %x2)
|
|
%res3 = add <16 x i8> %res0, %res1
|
|
%res4 = add <16 x i8> %res3, %res2
|
|
ret <16 x i8> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmovus.qb.mem.512(i8* %ptr, <8 x i64>, i8)
|
|
|
|
define void @test_int_x86_avx512_mask_pmovus_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovus_qb_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovusqb %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovusqb %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovus_qb_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovusqb %zmm0, (%eax)
|
|
; X86-NEXT: vpmovusqb %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmovus.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
|
|
call void @llvm.x86.avx512.mask.pmovus.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64>, <8 x i16>, i8)
|
|
|
|
define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmov_qw_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovqw %zmm0, %xmm2
|
|
; X64-NEXT: vpmovqw %zmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vpmovqw %zmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddw %xmm0, %xmm1, %xmm0
|
|
; X64-NEXT: vpaddw %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmov_qw_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpmovqw %zmm0, %xmm2
|
|
; X86-NEXT: vpmovqw %zmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vpmovqw %zmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddw %xmm0, %xmm1, %xmm0
|
|
; X86-NEXT: vpaddw %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
%res0 = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 -1)
|
|
%res1 = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2)
|
|
%res2 = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> %x0, <8 x i16> zeroinitializer, i8 %x2)
|
|
%res3 = add <8 x i16> %res0, %res1
|
|
%res4 = add <8 x i16> %res3, %res2
|
|
ret <8 x i16> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmov.qw.mem.512(i8* %ptr, <8 x i64>, i8)
|
|
|
|
define void @test_int_x86_avx512_mask_pmov_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmov_qw_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovqw %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovqw %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmov_qw_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovqw %zmm0, (%eax)
|
|
; X86-NEXT: vpmovqw %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmov.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
|
|
call void @llvm.x86.avx512.mask.pmov.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64>, <8 x i16>, i8)
|
|
|
|
define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovs_qw_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovsqw %zmm0, %xmm2
|
|
; X64-NEXT: vpmovsqw %zmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vpmovsqw %zmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddw %xmm0, %xmm1, %xmm0
|
|
; X64-NEXT: vpaddw %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovs_qw_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpmovsqw %zmm0, %xmm2
|
|
; X86-NEXT: vpmovsqw %zmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vpmovsqw %zmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddw %xmm0, %xmm1, %xmm0
|
|
; X86-NEXT: vpaddw %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
%res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 -1)
|
|
%res1 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2)
|
|
%res2 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64> %x0, <8 x i16> zeroinitializer, i8 %x2)
|
|
%res3 = add <8 x i16> %res0, %res1
|
|
%res4 = add <8 x i16> %res3, %res2
|
|
ret <8 x i16> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmovs.qw.mem.512(i8* %ptr, <8 x i64>, i8)
|
|
|
|
define void @test_int_x86_avx512_mask_pmovs_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovs_qw_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovsqw %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovsqw %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovs_qw_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovsqw %zmm0, (%eax)
|
|
; X86-NEXT: vpmovsqw %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmovs.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
|
|
call void @llvm.x86.avx512.mask.pmovs.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64>, <8 x i16>, i8)
|
|
|
|
define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovus_qw_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovusqw %zmm0, %xmm2
|
|
; X64-NEXT: vpmovusqw %zmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vpmovusqw %zmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddw %xmm0, %xmm1, %xmm0
|
|
; X64-NEXT: vpaddw %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovus_qw_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpmovusqw %zmm0, %xmm2
|
|
; X86-NEXT: vpmovusqw %zmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vpmovusqw %zmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddw %xmm0, %xmm1, %xmm0
|
|
; X86-NEXT: vpaddw %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
%res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 -1)
|
|
%res1 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2)
|
|
%res2 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64> %x0, <8 x i16> zeroinitializer, i8 %x2)
|
|
%res3 = add <8 x i16> %res0, %res1
|
|
%res4 = add <8 x i16> %res3, %res2
|
|
ret <8 x i16> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmovus.qw.mem.512(i8* %ptr, <8 x i64>, i8)
|
|
|
|
define void @test_int_x86_avx512_mask_pmovus_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovus_qw_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovusqw %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovusqw %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovus_qw_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovusqw %zmm0, (%eax)
|
|
; X86-NEXT: vpmovusqw %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmovus.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
|
|
call void @llvm.x86.avx512.mask.pmovus.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
|
|
ret void
|
|
}
|
|
|
|
define <8 x i32>@test_int_x86_avx512_mask_pmov_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmov_qd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vpmovqd %zmm0, %ymm2
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovqd %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vpmovqd %zmm0, %ymm0 {%k1} {z}
|
|
; X64-NEXT: vpaddd %ymm0, %ymm1, %ymm0
|
|
; X64-NEXT: vpaddd %ymm0, %ymm2, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmov_qd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vpmovqd %zmm0, %ymm2
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpmovqd %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vpmovqd %zmm0, %ymm0 {%k1} {z}
|
|
; X86-NEXT: vpaddd %ymm0, %ymm1, %ymm0
|
|
; X86-NEXT: vpaddd %ymm0, %ymm2, %ymm0
|
|
; X86-NEXT: retl
|
|
%1 = trunc <8 x i64> %x0 to <8 x i32>
|
|
%2 = trunc <8 x i64> %x0 to <8 x i32>
|
|
%3 = bitcast i8 %x2 to <8 x i1>
|
|
%4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> %x1
|
|
%5 = trunc <8 x i64> %x0 to <8 x i32>
|
|
%6 = bitcast i8 %x2 to <8 x i1>
|
|
%7 = select <8 x i1> %6, <8 x i32> %5, <8 x i32> zeroinitializer
|
|
%res3 = add <8 x i32> %1, %4
|
|
%res4 = add <8 x i32> %res3, %7
|
|
ret <8 x i32> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmov.qd.mem.512(i8* %ptr, <8 x i64>, i8)
|
|
|
|
define void @test_int_x86_avx512_mask_pmov_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmov_qd_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovqd %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovqd %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmov_qd_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovqd %zmm0, (%eax)
|
|
; X86-NEXT: vpmovqd %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmov.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
|
|
call void @llvm.x86.avx512.mask.pmov.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <8 x i32> @llvm.x86.avx512.mask.pmovs.qd.512(<8 x i64>, <8 x i32>, i8)
|
|
|
|
define <8 x i32>@test_int_x86_avx512_mask_pmovs_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovs_qd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovsqd %zmm0, %ymm2 {%k1} {z}
|
|
; X64-NEXT: vpmovsqd %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vpaddd %ymm2, %ymm1, %ymm1
|
|
; X64-NEXT: vpmovsqd %zmm0, %ymm0
|
|
; X64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovs_qd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpmovsqd %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vpmovsqd %zmm0, %ymm2 {%k1} {z}
|
|
; X86-NEXT: vpaddd %ymm2, %ymm1, %ymm1
|
|
; X86-NEXT: vpmovsqd %zmm0, %ymm0
|
|
; X86-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <8 x i32> @llvm.x86.avx512.mask.pmovs.qd.512(<8 x i64> %x0, <8 x i32> %x1, i8 -1)
|
|
%res1 = call <8 x i32> @llvm.x86.avx512.mask.pmovs.qd.512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2)
|
|
%res2 = call <8 x i32> @llvm.x86.avx512.mask.pmovs.qd.512(<8 x i64> %x0, <8 x i32> zeroinitializer, i8 %x2)
|
|
%res3 = add <8 x i32> %res0, %res1
|
|
%res4 = add <8 x i32> %res3, %res2
|
|
ret <8 x i32> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmovs.qd.mem.512(i8* %ptr, <8 x i64>, i8)
|
|
|
|
define void @test_int_x86_avx512_mask_pmovs_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovs_qd_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovsqd %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovsqd %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovs_qd_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovsqd %zmm0, (%eax)
|
|
; X86-NEXT: vpmovsqd %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmovs.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
|
|
call void @llvm.x86.avx512.mask.pmovs.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64>, <8 x i32>, i8)
|
|
|
|
define <8 x i32>@test_int_x86_avx512_mask_pmovus_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovus_qd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovusqd %zmm0, %ymm2 {%k1} {z}
|
|
; X64-NEXT: vpmovusqd %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vpaddd %ymm2, %ymm1, %ymm1
|
|
; X64-NEXT: vpmovusqd %zmm0, %ymm0
|
|
; X64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovus_qd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpmovusqd %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vpmovusqd %zmm0, %ymm2 {%k1} {z}
|
|
; X86-NEXT: vpaddd %ymm2, %ymm1, %ymm1
|
|
; X86-NEXT: vpmovusqd %zmm0, %ymm0
|
|
; X86-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64> %x0, <8 x i32> %x1, i8 -1)
|
|
%res1 = call <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2)
|
|
%res2 = call <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64> %x0, <8 x i32> zeroinitializer, i8 %x2)
|
|
%res3 = add <8 x i32> %res0, %res1
|
|
%res4 = add <8 x i32> %res3, %res2
|
|
ret <8 x i32> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmovus.qd.mem.512(i8* %ptr, <8 x i64>, i8)
|
|
|
|
define void @test_int_x86_avx512_mask_pmovus_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovus_qd_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovusqd %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovusqd %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovus_qd_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovusqd %zmm0, (%eax)
|
|
; X86-NEXT: vpmovusqd %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmovus.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
|
|
call void @llvm.x86.avx512.mask.pmovus.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32>, <16 x i8>, i16)
|
|
|
|
define <16 x i8>@test_int_x86_avx512_mask_pmov_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmov_db_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovdb %zmm0, %xmm2
|
|
; X64-NEXT: vpmovdb %zmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vpmovdb %zmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X64-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmov_db_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpmovdb %zmm0, %xmm2
|
|
; X86-NEXT: vpmovdb %zmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vpmovdb %zmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X86-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
%res0 = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 -1)
|
|
%res1 = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2)
|
|
%res2 = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> %x0, <16 x i8> zeroinitializer, i16 %x2)
|
|
%res3 = add <16 x i8> %res0, %res1
|
|
%res4 = add <16 x i8> %res3, %res2
|
|
ret <16 x i8> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmov.db.mem.512(i8* %ptr, <16 x i32>, i16)
|
|
|
|
define void @test_int_x86_avx512_mask_pmov_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmov_db_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovdb %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovdb %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmov_db_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovdb %zmm0, (%eax)
|
|
; X86-NEXT: vpmovdb %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmov.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
|
|
call void @llvm.x86.avx512.mask.pmov.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32>, <16 x i8>, i16)
|
|
|
|
define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovs_db_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovsdb %zmm0, %xmm2
|
|
; X64-NEXT: vpmovsdb %zmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vpmovsdb %zmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X64-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovs_db_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpmovsdb %zmm0, %xmm2
|
|
; X86-NEXT: vpmovsdb %zmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vpmovsdb %zmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X86-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
%res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 -1)
|
|
%res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2)
|
|
%res2 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32> %x0, <16 x i8> zeroinitializer, i16 %x2)
|
|
%res3 = add <16 x i8> %res0, %res1
|
|
%res4 = add <16 x i8> %res3, %res2
|
|
ret <16 x i8> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmovs.db.mem.512(i8* %ptr, <16 x i32>, i16)
|
|
|
|
define void @test_int_x86_avx512_mask_pmovs_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovs_db_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovsdb %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovsdb %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovs_db_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovsdb %zmm0, (%eax)
|
|
; X86-NEXT: vpmovsdb %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmovs.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
|
|
call void @llvm.x86.avx512.mask.pmovs.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32>, <16 x i8>, i16)
|
|
|
|
define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovus_db_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovusdb %zmm0, %xmm2
|
|
; X64-NEXT: vpmovusdb %zmm0, %xmm1 {%k1}
|
|
; X64-NEXT: vpmovusdb %zmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X64-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovus_db_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpmovusdb %zmm0, %xmm2
|
|
; X86-NEXT: vpmovusdb %zmm0, %xmm1 {%k1}
|
|
; X86-NEXT: vpmovusdb %zmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddb %xmm0, %xmm1, %xmm0
|
|
; X86-NEXT: vpaddb %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
%res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 -1)
|
|
%res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2)
|
|
%res2 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32> %x0, <16 x i8> zeroinitializer, i16 %x2)
|
|
%res3 = add <16 x i8> %res0, %res1
|
|
%res4 = add <16 x i8> %res3, %res2
|
|
ret <16 x i8> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmovus.db.mem.512(i8* %ptr, <16 x i32>, i16)
|
|
|
|
define void @test_int_x86_avx512_mask_pmovus_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovus_db_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovusdb %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovusdb %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovus_db_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovusdb %zmm0, (%eax)
|
|
; X86-NEXT: vpmovusdb %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmovus.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
|
|
call void @llvm.x86.avx512.mask.pmovus.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32>, <16 x i16>, i16)
|
|
|
|
define <16 x i16>@test_int_x86_avx512_mask_pmov_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmov_dw_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovdw %zmm0, %ymm2
|
|
; X64-NEXT: vpmovdw %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vpmovdw %zmm0, %ymm0 {%k1} {z}
|
|
; X64-NEXT: vpaddw %ymm0, %ymm1, %ymm0
|
|
; X64-NEXT: vpaddw %ymm0, %ymm2, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmov_dw_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpmovdw %zmm0, %ymm2
|
|
; X86-NEXT: vpmovdw %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vpmovdw %zmm0, %ymm0 {%k1} {z}
|
|
; X86-NEXT: vpaddw %ymm0, %ymm1, %ymm0
|
|
; X86-NEXT: vpaddw %ymm0, %ymm2, %ymm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 -1)
|
|
%res1 = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2)
|
|
%res2 = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> %x0, <16 x i16> zeroinitializer, i16 %x2)
|
|
%res3 = add <16 x i16> %res0, %res1
|
|
%res4 = add <16 x i16> %res3, %res2
|
|
ret <16 x i16> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmov.dw.mem.512(i8* %ptr, <16 x i32>, i16)
|
|
|
|
define void @test_int_x86_avx512_mask_pmov_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmov_dw_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovdw %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovdw %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmov_dw_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovdw %zmm0, (%eax)
|
|
; X86-NEXT: vpmovdw %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmov.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
|
|
call void @llvm.x86.avx512.mask.pmov.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32>, <16 x i16>, i16)
|
|
|
|
define <16 x i16>@test_int_x86_avx512_mask_pmovs_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovs_dw_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovsdw %zmm0, %ymm2
|
|
; X64-NEXT: vpmovsdw %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vpmovsdw %zmm0, %ymm0 {%k1} {z}
|
|
; X64-NEXT: vpaddw %ymm0, %ymm1, %ymm0
|
|
; X64-NEXT: vpaddw %ymm0, %ymm2, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovs_dw_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpmovsdw %zmm0, %ymm2
|
|
; X86-NEXT: vpmovsdw %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vpmovsdw %zmm0, %ymm0 {%k1} {z}
|
|
; X86-NEXT: vpaddw %ymm0, %ymm1, %ymm0
|
|
; X86-NEXT: vpaddw %ymm0, %ymm2, %ymm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 -1)
|
|
%res1 = call <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2)
|
|
%res2 = call <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32> %x0, <16 x i16> zeroinitializer, i16 %x2)
|
|
%res3 = add <16 x i16> %res0, %res1
|
|
%res4 = add <16 x i16> %res3, %res2
|
|
ret <16 x i16> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmovs.dw.mem.512(i8* %ptr, <16 x i32>, i16)
|
|
|
|
define void @test_int_x86_avx512_mask_pmovs_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovs_dw_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovsdw %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovsdw %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovs_dw_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovsdw %zmm0, (%eax)
|
|
; X86-NEXT: vpmovsdw %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmovs.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
|
|
call void @llvm.x86.avx512.mask.pmovs.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32>, <16 x i16>, i16)
|
|
|
|
define <16 x i16>@test_int_x86_avx512_mask_pmovus_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovus_dw_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpmovusdw %zmm0, %ymm2
|
|
; X64-NEXT: vpmovusdw %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vpmovusdw %zmm0, %ymm0 {%k1} {z}
|
|
; X64-NEXT: vpaddw %ymm0, %ymm1, %ymm0
|
|
; X64-NEXT: vpaddw %ymm0, %ymm2, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovus_dw_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpmovusdw %zmm0, %ymm2
|
|
; X86-NEXT: vpmovusdw %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vpmovusdw %zmm0, %ymm0 {%k1} {z}
|
|
; X86-NEXT: vpaddw %ymm0, %ymm1, %ymm0
|
|
; X86-NEXT: vpaddw %ymm0, %ymm2, %ymm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 -1)
|
|
%res1 = call <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2)
|
|
%res2 = call <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32> %x0, <16 x i16> zeroinitializer, i16 %x2)
|
|
%res3 = add <16 x i16> %res0, %res1
|
|
%res4 = add <16 x i16> %res3, %res2
|
|
ret <16 x i16> %res4
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.pmovus.dw.mem.512(i8* %ptr, <16 x i32>, i16)
|
|
|
|
define void @test_int_x86_avx512_mask_pmovus_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pmovus_dw_mem_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vpmovusdw %zmm0, (%rdi)
|
|
; X64-NEXT: vpmovusdw %zmm0, (%rdi) {%k1}
|
|
; X64-NEXT: vzeroupper
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pmovus_dw_mem_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vpmovusdw %zmm0, (%eax)
|
|
; X86-NEXT: vpmovusdw %zmm0, (%eax) {%k1}
|
|
; X86-NEXT: vzeroupper
|
|
; X86-NEXT: retl
|
|
call void @llvm.x86.avx512.mask.pmovus.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
|
|
call void @llvm.x86.avx512.mask.pmovus.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
|
|
ret void
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.sitofp.round.v16f32.v16i32(<16 x i32>, i32)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_cvt_dq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvt_dq2ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtdq2ps %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vcvtdq2ps {rn-sae}, %zmm0, %zmm0
|
|
; X64-NEXT: vaddps %zmm0, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvt_dq2ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcvtdq2ps %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vcvtdq2ps {rn-sae}, %zmm0, %zmm0
|
|
; X86-NEXT: vaddps %zmm0, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%cvt = sitofp <16 x i32> %x0 to <16 x float>
|
|
%1 = bitcast i16 %x2 to <16 x i1>
|
|
%2 = select <16 x i1> %1, <16 x float> %cvt, <16 x float> %x1
|
|
%3 = call <16 x float> @llvm.x86.avx512.sitofp.round.v16f32.v16i32(<16 x i32> %x0, i32 8)
|
|
%res2 = fadd <16 x float> %2, %3
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2dq.512(<8 x double>, <8 x i32>, i8, i32)
|
|
|
|
define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvt_pd2dq_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtpd2dq %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vcvtpd2dq {rn-sae}, %zmm0, %ymm0
|
|
; X64-NEXT: vpaddd %ymm0, %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvt_pd2dq_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcvtpd2dq %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vcvtpd2dq {rn-sae}, %zmm0, %ymm0
|
|
; X86-NEXT: vpaddd %ymm0, %ymm1, %ymm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i32> @llvm.x86.avx512.mask.cvtpd2dq.512(<8 x double> %x0, <8 x i32> %x1, i8 %x2, i32 4)
|
|
%res1 = call <8 x i32> @llvm.x86.avx512.mask.cvtpd2dq.512(<8 x double> %x0, <8 x i32> %x1, i8 -1, i32 8)
|
|
%res2 = add <8 x i32> %res, %res1
|
|
ret <8 x i32> %res2
|
|
}
|
|
|
|
declare <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double>, <8 x float>, i8, i32)
|
|
|
|
define <8 x float>@test_int_x86_avx512_mask_cvt_pd2ps_512(<8 x double> %x0, <8 x float> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvt_pd2ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtpd2ps %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vcvtpd2ps {ru-sae}, %zmm0, %ymm0
|
|
; X64-NEXT: vaddps %ymm0, %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvt_pd2ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcvtpd2ps %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vcvtpd2ps {ru-sae}, %zmm0, %ymm0
|
|
; X86-NEXT: vaddps %ymm0, %ymm1, %ymm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double> %x0, <8 x float> %x1, i8 %x2, i32 4)
|
|
%res1 = call <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double> %x0, <8 x float> %x1, i8 -1, i32 10)
|
|
%res2 = fadd <8 x float> %res, %res1
|
|
ret <8 x float> %res2
|
|
}
|
|
|
|
declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double>, <8 x i32>, i8, i32)
|
|
|
|
define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvt_pd2udq_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtpd2udq {ru-sae}, %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vcvtpd2udq {rn-sae}, %zmm0, %ymm0
|
|
; X64-NEXT: vpaddd %ymm0, %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvt_pd2udq_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcvtpd2udq {ru-sae}, %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vcvtpd2udq {rn-sae}, %zmm0, %ymm0
|
|
; X86-NEXT: vpaddd %ymm0, %ymm1, %ymm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double> %x0, <8 x i32> %x1, i8 %x2, i32 10)
|
|
%res1 = call <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double> %x0, <8 x i32> %x1, i8 -1, i32 8)
|
|
%res2 = add <8 x i32> %res, %res1
|
|
ret <8 x i32> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512(<16 x float>, <16 x i32>, i16, i32)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvt_ps2dq_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtps2dq {ru-sae}, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vcvtps2dq {rn-sae}, %zmm0, %zmm0
|
|
; X64-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvt_ps2dq_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcvtps2dq {ru-sae}, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vcvtps2dq {rn-sae}, %zmm0, %zmm0
|
|
; X86-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512(<16 x float> %x0, <16 x i32> %x1, i16 %x2, i32 10)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512(<16 x float> %x0, <16 x i32> %x1, i16 -1, i32 8)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.cvtps2pd.512(<8 x float>, <8 x double>, i8, i32)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_cvt_ps2pd_512(<8 x float> %x0, <8 x double> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvt_ps2pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtps2pd %ymm0, %zmm1 {%k1}
|
|
; X64-NEXT: vcvtps2pd {sae}, %ymm0, %zmm0
|
|
; X64-NEXT: vaddpd %zmm0, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvt_ps2pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcvtps2pd %ymm0, %zmm1 {%k1}
|
|
; X86-NEXT: vcvtps2pd {sae}, %ymm0, %zmm0
|
|
; X86-NEXT: vaddpd %zmm0, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.cvtps2pd.512(<8 x float> %x0, <8 x double> %x1, i8 %x2, i32 4)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.cvtps2pd.512(<8 x float> %x0, <8 x double> %x1, i8 -1, i32 8)
|
|
%res2 = fadd <8 x double> %res, %res1
|
|
ret <8 x double> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float>, <16 x i32>, i16, i32)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvt_ps2udq_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtps2udq {ru-sae}, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vcvtps2udq {rn-sae}, %zmm0, %zmm0
|
|
; X64-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvt_ps2udq_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcvtps2udq {ru-sae}, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vcvtps2udq {rn-sae}, %zmm0, %zmm0
|
|
; X86-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float> %x0, <16 x i32> %x1, i16 %x2, i32 10)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float> %x0, <16 x i32> %x1, i16 -1, i32 8)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <8 x i32> @llvm.x86.avx512.mask.cvttpd2dq.512(<8 x double>, <8 x i32>, i8, i32)
|
|
|
|
define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvtt_pd2dq_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvttpd2dq %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vcvttpd2dq {sae}, %zmm0, %ymm0
|
|
; X64-NEXT: vpaddd %ymm0, %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvtt_pd2dq_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcvttpd2dq %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vcvttpd2dq {sae}, %zmm0, %ymm0
|
|
; X86-NEXT: vpaddd %ymm0, %ymm1, %ymm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i32> @llvm.x86.avx512.mask.cvttpd2dq.512(<8 x double> %x0, <8 x i32> %x1, i8 %x2, i32 4)
|
|
%res1 = call <8 x i32> @llvm.x86.avx512.mask.cvttpd2dq.512(<8 x double> %x0, <8 x i32> %x1, i8 -1, i32 8)
|
|
%res2 = add <8 x i32> %res, %res1
|
|
ret <8 x i32> %res2
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.uitofp.round.v16f32.v16i32(<16 x i32>, i32)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_cvt_udq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvt_udq2ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtudq2ps %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vcvtudq2ps {rn-sae}, %zmm0, %zmm0
|
|
; X64-NEXT: vaddps %zmm0, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvt_udq2ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcvtudq2ps %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vcvtudq2ps {rn-sae}, %zmm0, %zmm0
|
|
; X86-NEXT: vaddps %zmm0, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%cvt = uitofp <16 x i32> %x0 to <16 x float>
|
|
%1 = bitcast i16 %x2 to <16 x i1>
|
|
%2 = select <16 x i1> %1, <16 x float> %cvt, <16 x float> %x1
|
|
%3 = call <16 x float> @llvm.x86.avx512.uitofp.round.v16f32.v16i32(<16 x i32> %x0, i32 8)
|
|
%res2 = fadd <16 x float> %2, %3
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
declare <8 x i32> @llvm.x86.avx512.mask.cvttpd2udq.512(<8 x double>, <8 x i32>, i8, i32)
|
|
|
|
define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvtt_pd2udq_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvttpd2udq %zmm0, %ymm1 {%k1}
|
|
; X64-NEXT: vcvttpd2udq {sae}, %zmm0, %ymm0
|
|
; X64-NEXT: vpaddd %ymm0, %ymm1, %ymm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvtt_pd2udq_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcvttpd2udq %zmm0, %ymm1 {%k1}
|
|
; X86-NEXT: vcvttpd2udq {sae}, %zmm0, %ymm0
|
|
; X86-NEXT: vpaddd %ymm0, %ymm1, %ymm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i32> @llvm.x86.avx512.mask.cvttpd2udq.512(<8 x double> %x0, <8 x i32> %x1, i8 %x2, i32 4)
|
|
%res1 = call <8 x i32> @llvm.x86.avx512.mask.cvttpd2udq.512(<8 x double> %x0, <8 x i32> %x1, i8 -1, i32 8)
|
|
%res2 = add <8 x i32> %res, %res1
|
|
ret <8 x i32> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.cvttps2dq.512(<16 x float>, <16 x i32>, i16, i32)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvtt_ps2dq_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvttps2dq %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vcvttps2dq {sae}, %zmm0, %zmm0
|
|
; X64-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvtt_ps2dq_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcvttps2dq %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vcvttps2dq {sae}, %zmm0, %zmm0
|
|
; X86-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.cvttps2dq.512(<16 x float> %x0, <16 x i32> %x1, i16 %x2, i32 4)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.cvttps2dq.512(<16 x float> %x0, <16 x i32> %x1, i16 -1, i32 8)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.cvttps2udq.512(<16 x float>, <16 x i32>, i16, i32)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvtt_ps2udq_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvttps2udq %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vcvttps2udq {sae}, %zmm0, %zmm0
|
|
; X64-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvtt_ps2udq_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vcvttps2udq %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vcvttps2udq {sae}, %zmm0, %zmm0
|
|
; X86-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.cvttps2udq.512(<16 x float> %x0, <16 x i32> %x1, i16 %x2, i32 4)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.cvttps2udq.512(<16 x float> %x0, <16 x i32> %x1, i16 -1, i32 8)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
|
|
|
|
define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
|
|
; X64-LABEL: test_getexp_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovaps %xmm2, %xmm3
|
|
; X64-NEXT: vgetexpss %xmm1, %xmm0, %xmm3 {%k1}
|
|
; X64-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
|
|
; X64-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm5
|
|
; X64-NEXT: vaddps %xmm5, %xmm4, %xmm4
|
|
; X64-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vaddps %xmm2, %xmm3, %xmm0
|
|
; X64-NEXT: vaddps %xmm4, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_getexp_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovaps %xmm2, %xmm3
|
|
; X86-NEXT: vgetexpss %xmm1, %xmm0, %xmm3 {%k1}
|
|
; X86-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vaddps %xmm2, %xmm3, %xmm2
|
|
; X86-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
|
|
; X86-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm0
|
|
; X86-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: vaddps %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
|
|
%res1 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 8)
|
|
%res2 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 8)
|
|
%res3 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 8)
|
|
|
|
%res.1 = fadd <4 x float> %res0, %res1
|
|
%res.2 = fadd <4 x float> %res2, %res3
|
|
%res = fadd <4 x float> %res.1, %res.2
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
|
|
|
|
define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
|
|
; X64-LABEL: test_getexp_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vgetexpsd %xmm1, %xmm0, %xmm3
|
|
; X64-NEXT: vmovapd %xmm2, %xmm4
|
|
; X64-NEXT: vgetexpsd %xmm1, %xmm0, %xmm4 {%k1}
|
|
; X64-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm5 {%k1} {z}
|
|
; X64-NEXT: vaddpd %xmm3, %xmm5, %xmm3
|
|
; X64-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vaddpd %xmm2, %xmm4, %xmm0
|
|
; X64-NEXT: vaddpd %xmm3, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_getexp_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %xmm2, %xmm3
|
|
; X86-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm3 {%k1}
|
|
; X86-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
|
|
; X86-NEXT: vgetexpsd %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vaddpd %xmm3, %xmm2, %xmm2
|
|
; X86-NEXT: vgetexpsd %xmm1, %xmm0, %xmm0
|
|
; X86-NEXT: vaddpd %xmm0, %xmm4, %xmm0
|
|
; X86-NEXT: vaddpd %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
|
|
%res1 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 8)
|
|
%res2 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 8)
|
|
%res3 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 4)
|
|
|
|
%res.1 = fadd <2 x double> %res0, %res1
|
|
%res.2 = fadd <2 x double> %res2, %res3
|
|
%res = fadd <2 x double> %res.1, %res.2
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
declare i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double>, <2 x double>, i32, i8, i32)
|
|
|
|
define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cmp_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
|
|
; X64-NEXT: kmovw %k0, %eax
|
|
; X64-NEXT: # kill: def $al killed $al killed $eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cmp_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
|
|
; X86-NEXT: kmovw %k0, %eax
|
|
; X86-NEXT: # kill: def $al killed $al killed $eax
|
|
; X86-NEXT: retl
|
|
|
|
%res4 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 5, i8 %x3, i32 8)
|
|
ret i8 %res4
|
|
}
|
|
|
|
define i8@test_int_x86_avx512_mask_cmp_sd_all(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cmp_sd_all:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcmplesd %xmm1, %xmm0, %k0
|
|
; X64-NEXT: kmovw %k0, %ecx
|
|
; X64-NEXT: vcmpunordsd {sae}, %xmm1, %xmm0, %k0
|
|
; X64-NEXT: kmovw %k0, %edx
|
|
; X64-NEXT: vcmpneqsd %xmm1, %xmm0, %k0 {%k1}
|
|
; X64-NEXT: kmovw %k0, %esi
|
|
; X64-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
|
|
; X64-NEXT: kmovw %k0, %eax
|
|
; X64-NEXT: orl %esi, %eax
|
|
; X64-NEXT: orl %edx, %eax
|
|
; X64-NEXT: orl %ecx, %eax
|
|
; X64-NEXT: # kill: def $al killed $al killed $eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cmp_sd_all:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
|
; X86-NEXT: .cfi_offset %esi, -8
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcmplesd %xmm1, %xmm0, %k0
|
|
; X86-NEXT: kmovw %k0, %ecx
|
|
; X86-NEXT: vcmpunordsd {sae}, %xmm1, %xmm0, %k0
|
|
; X86-NEXT: kmovw %k0, %edx
|
|
; X86-NEXT: vcmpneqsd %xmm1, %xmm0, %k0 {%k1}
|
|
; X86-NEXT: kmovw %k0, %esi
|
|
; X86-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
|
|
; X86-NEXT: kmovw %k0, %eax
|
|
; X86-NEXT: orl %esi, %eax
|
|
; X86-NEXT: orl %edx, %eax
|
|
; X86-NEXT: orl %ecx, %eax
|
|
; X86-NEXT: # kill: def $al killed $al killed $eax
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: .cfi_def_cfa_offset 4
|
|
; X86-NEXT: retl
|
|
|
|
%res1 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 2, i8 -1, i32 4)
|
|
%res2 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 3, i8 -1, i32 8)
|
|
%res3 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 4, i8 %x3, i32 4)
|
|
%res4 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 5, i8 %x3, i32 8)
|
|
|
|
%res11 = or i8 %res1, %res2
|
|
%res12 = or i8 %res3, %res4
|
|
%res13 = or i8 %res11, %res12
|
|
ret i8 %res13
|
|
}
|
|
|
|
declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32)
|
|
|
|
define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cmp_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1}
|
|
; X64-NEXT: kmovw %k0, %eax
|
|
; X64-NEXT: # kill: def $al killed $al killed $eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cmp_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1}
|
|
; X86-NEXT: kmovw %k0, %eax
|
|
; X86-NEXT: # kill: def $al killed $al killed $eax
|
|
; X86-NEXT: retl
|
|
|
|
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 %x3, i32 4)
|
|
ret i8 %res2
|
|
}
|
|
|
|
|
|
define i8@test_int_x86_avx512_mask_cmp_ss_all(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cmp_ss_all:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcmpless %xmm1, %xmm0, %k0
|
|
; X64-NEXT: kmovw %k0, %ecx
|
|
; X64-NEXT: vcmpunordss {sae}, %xmm1, %xmm0, %k0
|
|
; X64-NEXT: kmovw %k0, %edx
|
|
; X64-NEXT: vcmpneqss %xmm1, %xmm0, %k0 {%k1}
|
|
; X64-NEXT: kmovw %k0, %esi
|
|
; X64-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k0 {%k1}
|
|
; X64-NEXT: kmovw %k0, %eax
|
|
; X64-NEXT: andl %esi, %eax
|
|
; X64-NEXT: andl %edx, %eax
|
|
; X64-NEXT: andl %ecx, %eax
|
|
; X64-NEXT: # kill: def $al killed $al killed $eax
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cmp_ss_all:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
|
; X86-NEXT: .cfi_offset %esi, -8
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcmpless %xmm1, %xmm0, %k0
|
|
; X86-NEXT: kmovw %k0, %ecx
|
|
; X86-NEXT: vcmpunordss {sae}, %xmm1, %xmm0, %k0
|
|
; X86-NEXT: kmovw %k0, %edx
|
|
; X86-NEXT: vcmpneqss %xmm1, %xmm0, %k0 {%k1}
|
|
; X86-NEXT: kmovw %k0, %esi
|
|
; X86-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k0 {%k1}
|
|
; X86-NEXT: kmovw %k0, %eax
|
|
; X86-NEXT: andl %esi, %eax
|
|
; X86-NEXT: andl %edx, %eax
|
|
; X86-NEXT: andl %ecx, %eax
|
|
; X86-NEXT: # kill: def $al killed $al killed $eax
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: .cfi_def_cfa_offset 4
|
|
; X86-NEXT: retl
|
|
%res1 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 2, i8 -1, i32 4)
|
|
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 -1, i32 8)
|
|
%res3 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 4, i8 %x3, i32 4)
|
|
%res4 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 5, i8 %x3, i32 8)
|
|
|
|
%res11 = and i8 %res1, %res2
|
|
%res12 = and i8 %res3, %res4
|
|
%res13 = and i8 %res11, %res12
|
|
ret i8 %res13
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double>, i32, <8 x double>, i8, i32)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_getmant_pd_512(<8 x double> %x0, <8 x double> %x2, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_getmant_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vgetmantpd $11, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vgetmantpd $11, {sae}, %zmm0, %zmm0
|
|
; X64-NEXT: vaddpd %zmm0, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_getmant_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vgetmantpd $11, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vgetmantpd $11, {sae}, %zmm0, %zmm0
|
|
; X86-NEXT: vaddpd %zmm0, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double> %x0, i32 11, <8 x double> %x2, i8 %x3, i32 4)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double> %x0, i32 11, <8 x double> %x2, i8 -1, i32 8)
|
|
%res2 = fadd <8 x double> %res, %res1
|
|
ret <8 x double> %res2
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.getmant.ps.512(<16 x float>, i32, <16 x float>, i16, i32)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_getmant_ps_512(<16 x float> %x0, <16 x float> %x2, i16 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_getmant_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vgetmantps $11, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vgetmantps $11, {sae}, %zmm0, %zmm0
|
|
; X64-NEXT: vaddps %zmm0, %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_getmant_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vgetmantps $11, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vgetmantps $11, {sae}, %zmm0, %zmm0
|
|
; X86-NEXT: vaddps %zmm0, %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.getmant.ps.512(<16 x float> %x0, i32 11, <16 x float> %x2, i16 %x3, i32 4)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.getmant.ps.512(<16 x float> %x0, i32 11, <16 x float> %x2, i16 -1, i32 8)
|
|
%res2 = fadd <16 x float> %res, %res1
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double>, <2 x double>, i32, <2 x double>, i8, i32)
|
|
|
|
define <2 x double>@test_int_x86_avx512_mask_getmant_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_getmant_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm3
|
|
; X64-NEXT: vmovapd %xmm2, %xmm4
|
|
; X64-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1}
|
|
; X64-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm5 {%k1} {z}
|
|
; X64-NEXT: vaddpd %xmm5, %xmm4, %xmm4
|
|
; X64-NEXT: vgetmantsd $11, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vaddpd %xmm3, %xmm2, %xmm0
|
|
; X64-NEXT: vaddpd %xmm0, %xmm4, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_getmant_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %xmm2, %xmm3
|
|
; X86-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm3 {%k1}
|
|
; X86-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1} {z}
|
|
; X86-NEXT: vaddpd %xmm4, %xmm3, %xmm3
|
|
; X86-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4
|
|
; X86-NEXT: vgetmantsd $11, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vaddpd %xmm4, %xmm2, %xmm0
|
|
; X86-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> %x2, i8 %x3, i32 4)
|
|
%res1 = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> zeroinitializer, i8 %x3, i32 4)
|
|
%res2 = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> %x2, i8 %x3, i32 8)
|
|
%res3 = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> %x2, i8 -1, i32 4)
|
|
%res11 = fadd <2 x double> %res, %res1
|
|
%res12 = fadd <2 x double> %res2, %res3
|
|
%res13 = fadd <2 x double> %res11, %res12
|
|
ret <2 x double> %res13
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float>, <4 x float>, i32, <4 x float>, i8, i32)
|
|
|
|
define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_getmant_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3
|
|
; X64-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm4 {%k1} {z}
|
|
; X64-NEXT: vaddps %xmm4, %xmm2, %xmm2
|
|
; X64-NEXT: vgetmantss $11, {sae}, %xmm1, %xmm0, %xmm0
|
|
; X64-NEXT: vaddps %xmm3, %xmm0, %xmm0
|
|
; X64-NEXT: vaddps %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_getmant_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3 {%k1} {z}
|
|
; X86-NEXT: vaddps %xmm3, %xmm2, %xmm2
|
|
; X86-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3
|
|
; X86-NEXT: vgetmantss $11, {sae}, %xmm1, %xmm0, %xmm0
|
|
; X86-NEXT: vaddps %xmm3, %xmm0, %xmm0
|
|
; X86-NEXT: vaddps %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> %x2, i8 %x3, i32 4)
|
|
%res1 = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> zeroinitializer, i8 %x3, i32 4)
|
|
%res2 = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> %x2, i8 -1, i32 8)
|
|
%res3 = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> %x2, i8 -1, i32 4)
|
|
%res11 = fadd <4 x float> %res, %res1
|
|
%res12 = fadd <4 x float> %res2, %res3
|
|
%res13 = fadd <4 x float> %res11, %res12
|
|
ret <4 x float> %res13
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double>, <8 x i64>)
|
|
|
|
define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512(<8 x double> %x0, <8 x i64> %x1) {
|
|
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_pd_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpermilpd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %x0, <8 x i64> %x1)
|
|
ret <8 x double> %res
|
|
}
|
|
|
|
define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_mask(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %mask) {
|
|
; X64-LABEL: test_int_x86_avx512_vpermilvar_pd_512_mask:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermilpd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovapd %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_vpermilvar_pd_512_mask:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpermilpd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovapd %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %x0, <8 x i64> %x1)
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x double> %res, <8 x double> %x2
|
|
ret <8 x double> %res2
|
|
}
|
|
|
|
define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_maskz(<8 x double> %x0, <8 x i64> %x1, i8 %mask) {
|
|
; X64-LABEL: test_int_x86_avx512_vpermilvar_pd_512_maskz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermilpd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_vpermilvar_pd_512_maskz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpermilpd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %x0, <8 x i64> %x1)
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x double> %res, <8 x double> zeroinitializer
|
|
ret <8 x double> %res2
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float>, <16 x i32>)
|
|
|
|
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512(<16 x float> %x0, <16 x i32> %x1) {
|
|
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpermilps %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> %x1)
|
|
ret <16 x float> %res
|
|
}
|
|
|
|
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) {
|
|
; X64-LABEL: test_int_x86_avx512_vpermilvar_ps_512_mask:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermilps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_vpermilvar_ps_512_mask:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpermilps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> %x1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x float> %res, <16 x float> %x2
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) {
|
|
; X64-LABEL: test_int_x86_avx512_vpermilvar_ps_512_maskz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermilps %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_vpermilvar_ps_512_maskz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpermilps %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> %x1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x float> %res, <16 x float> zeroinitializer
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
; Test case to make sure we can print shuffle decode comments for constant pool loads.
|
|
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool(<16 x float> %x0, <16 x i32> %x1) {
|
|
; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0>)
|
|
ret <16 x float> %res
|
|
}
|
|
|
|
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) {
|
|
; X64-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermilps {{.*#+}} zmm2 {%k1} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
|
|
; X64-NEXT: vmovaps %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpermilps {{.*#+}} zmm2 {%k1} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0>)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x float> %res, <16 x float> %x2
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) {
|
|
; X64-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
|
|
; X86-NEXT: retl
|
|
%res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0>)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x float> %res, <16 x float> zeroinitializer
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double>, <4 x float>, <2 x double>, i8, i32)
|
|
|
|
define <2 x double>@test_int_x86_avx512_mask_cvt_ss2sd_round(<2 x double> %x0,<4 x float> %x1, <2 x double> %x2, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvt_ss2sd_round:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vcvtss2sd {sae}, %xmm1, %xmm0, %xmm0
|
|
; X64-NEXT: vaddpd %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvt_ss2sd_round:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vcvtss2sd {sae}, %xmm1, %xmm0, %xmm0
|
|
; X86-NEXT: vaddpd %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double> %x0, <4 x float> %x1, <2 x double> %x2, i8 %x3, i32 4)
|
|
%res1 = call <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double> %x0, <4 x float> %x1, <2 x double> %x2, i8 -1, i32 8)
|
|
%res2 = fadd <2 x double> %res, %res1
|
|
ret <2 x double> %res2
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float>, <2 x double>, <4 x float>, i8, i32)
|
|
|
|
define <4 x float>@test_int_x86_avx512_mask_cvt_sd2ss_round(<4 x float> %x0,<2 x double> %x1, <4 x float> %x2, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_cvt_sd2ss_round:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vcvtsd2ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vcvtsd2ss {rn-sae}, %xmm1, %xmm0, %xmm0
|
|
; X64-NEXT: vaddps %xmm0, %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_cvt_sd2ss_round:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vcvtsd2ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vcvtsd2ss {rn-sae}, %xmm1, %xmm0, %xmm0
|
|
; X86-NEXT: vaddps %xmm0, %xmm2, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float> %x0, <2 x double> %x1, <4 x float> %x2, i8 %x3, i32 11)
|
|
%res1 = call <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float> %x0, <2 x double> %x1, <4 x float> %x2, i8 -1, i32 8)
|
|
%res2 = fadd <4 x float> %res, %res1
|
|
ret <4 x float> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i32)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pternlog_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovdqa64 %zmm0, %zmm3
|
|
; X64-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: vpaddd %zmm3, %zmm0, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pternlog_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovdqa64 %zmm0, %zmm3
|
|
; X86-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm3
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: vpaddd %zmm3, %zmm0, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33)
|
|
%2 = bitcast i16 %x4 to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x0
|
|
%4 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33)
|
|
%res2 = add <16 x i32> %3, %4
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32>@test_int_x86_avx512_maskz_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_maskz_pternlog_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovdqa64 %zmm0, %zmm3
|
|
; X64-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddd %zmm3, %zmm0, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_pternlog_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovdqa64 %zmm0, %zmm3
|
|
; X86-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm3
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddd %zmm3, %zmm0, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33)
|
|
%2 = bitcast i16 %x4 to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> zeroinitializer
|
|
%4 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33)
|
|
%res2 = add <16 x i32> %3, %4
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i32)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_pternlog_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovdqa64 %zmm0, %zmm3
|
|
; X64-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1}
|
|
; X64-NEXT: vpaddq %zmm3, %zmm0, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_pternlog_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovdqa64 %zmm0, %zmm3
|
|
; X86-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm3
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1}
|
|
; X86-NEXT: vpaddq %zmm3, %zmm0, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33)
|
|
%2 = bitcast i8 %x4 to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x0
|
|
%4 = call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33)
|
|
%res2 = add <8 x i64> %3, %4
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_maskz_pternlog_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovdqa64 %zmm0, %zmm3
|
|
; X64-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddq %zmm3, %zmm0, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_pternlog_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovdqa64 %zmm0, %zmm3
|
|
; X86-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm3
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddq %zmm3, %zmm0, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33)
|
|
%2 = bitcast i8 %x4 to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> zeroinitializer
|
|
%4 = call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33)
|
|
%res2 = add <8 x i64> %3, %4
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
define i32 @test_x86_avx512_comi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_comi_sd_eq_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcmpeqsd {sae}, %xmm1, %xmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 0, i32 8)
|
|
ret i32 %res
|
|
}
|
|
|
|
define i32 @test_x86_avx512_ucomi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_ucomi_sd_eq_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcmpeq_uqsd {sae}, %xmm1, %xmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 8, i32 8)
|
|
ret i32 %res
|
|
}
|
|
|
|
define i32 @test_x86_avx512_comi_sd_eq(<2 x double> %a0, <2 x double> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_comi_sd_eq:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcmpeqsd %xmm1, %xmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 0, i32 4)
|
|
ret i32 %res
|
|
}
|
|
|
|
define i32 @test_x86_avx512_ucomi_sd_eq(<2 x double> %a0, <2 x double> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_ucomi_sd_eq:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcmpeq_uqsd %xmm1, %xmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 8, i32 4)
|
|
ret i32 %res
|
|
}
|
|
|
|
define i32 @test_x86_avx512_comi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_comi_sd_lt_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcmpltsd {sae}, %xmm1, %xmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 1, i32 8)
|
|
ret i32 %res
|
|
}
|
|
|
|
define i32 @test_x86_avx512_ucomi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_ucomi_sd_lt_sae:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcmpngesd {sae}, %xmm1, %xmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 9, i32 8)
|
|
ret i32 %res
|
|
}
|
|
|
|
define i32 @test_x86_avx512_comi_sd_lt(<2 x double> %a0, <2 x double> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_comi_sd_lt:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcmpltsd %xmm1, %xmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 1, i32 4)
|
|
ret i32 %res
|
|
}
|
|
|
|
define i32 @test_x86_avx512_ucomi_sd_lt(<2 x double> %a0, <2 x double> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_ucomi_sd_lt:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcmpngesd %xmm1, %xmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 9, i32 4)
|
|
ret i32 %res
|
|
}
|
|
|
|
declare i32 @llvm.x86.avx512.vcomi.sd(<2 x double>, <2 x double>, i32, i32)
|
|
|
|
define i32 @test_x86_avx512_ucomi_ss_lt(<4 x float> %a0, <4 x float> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_ucomi_ss_lt:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vcmpngess %xmm1, %xmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call i32 @llvm.x86.avx512.vcomi.ss(<4 x float> %a0, <4 x float> %a1, i32 9, i32 4)
|
|
ret i32 %res
|
|
}
|
|
|
|
declare i32 @llvm.x86.avx512.vcomi.ss(<4 x float>, <4 x float>, i32, i32)
|
|
|
|
declare <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double>, <8 x i64>)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_permvar_df_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vpermpd %zmm0, %zmm1, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermpd %zmm0, %zmm1, %zmm2 {%k1}
|
|
; X64-NEXT: vpermpd %zmm0, %zmm1, %zmm0 {%k1} {z}
|
|
; X64-NEXT: vaddpd %zmm0, %zmm2, %zmm0
|
|
; X64-NEXT: vaddpd %zmm3, %zmm0, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_permvar_df_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vpermpd %zmm0, %zmm1, %zmm3
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpermpd %zmm0, %zmm1, %zmm2 {%k1}
|
|
; X86-NEXT: vpermpd %zmm0, %zmm1, %zmm0 {%k1} {z}
|
|
; X86-NEXT: vaddpd %zmm0, %zmm2, %zmm0
|
|
; X86-NEXT: vaddpd %zmm3, %zmm0, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double> %x0, <8 x i64> %x1)
|
|
%2 = bitcast i8 %x3 to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x double> %1, <8 x double> %x2
|
|
%4 = call <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double> %x0, <8 x i64> %x1)
|
|
%5 = bitcast i8 %x3 to <8 x i1>
|
|
%6 = select <8 x i1> %5, <8 x double> %4, <8 x double> zeroinitializer
|
|
%7 = call <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double> %x0, <8 x i64> %x1)
|
|
%res3 = fadd <8 x double> %3, %6
|
|
%res4 = fadd <8 x double> %res3, %7
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64>, <8 x i64>)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_permvar_di_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vpermq %zmm0, %zmm1, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermq %zmm0, %zmm1, %zmm2 {%k1}
|
|
; X64-NEXT: vpermq %zmm0, %zmm1, %zmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddq %zmm3, %zmm0, %zmm0
|
|
; X64-NEXT: vpaddq %zmm0, %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_permvar_di_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vpermq %zmm0, %zmm1, %zmm3
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpermq %zmm0, %zmm1, %zmm2 {%k1}
|
|
; X86-NEXT: vpermq %zmm0, %zmm1, %zmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddq %zmm3, %zmm0, %zmm0
|
|
; X86-NEXT: vpaddq %zmm0, %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %x0, <8 x i64> %x1)
|
|
%2 = bitcast i8 %x3 to <8 x i1>
|
|
%3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
|
|
%4 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %x0, <8 x i64> %x1)
|
|
%5 = bitcast i8 %x3 to <8 x i1>
|
|
%6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
|
|
%7 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %x0, <8 x i64> %x1)
|
|
%res3 = add <8 x i64> %3, %6
|
|
%res4 = add <8 x i64> %res3, %7
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float>, <16 x i32>)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_permvar_sf_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vpermps %zmm0, %zmm1, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermps %zmm0, %zmm1, %zmm2 {%k1}
|
|
; X64-NEXT: vpermps %zmm0, %zmm1, %zmm0 {%k1} {z}
|
|
; X64-NEXT: vaddps %zmm0, %zmm2, %zmm0
|
|
; X64-NEXT: vaddps %zmm3, %zmm0, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_permvar_sf_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vpermps %zmm0, %zmm1, %zmm3
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpermps %zmm0, %zmm1, %zmm2 {%k1}
|
|
; X86-NEXT: vpermps %zmm0, %zmm1, %zmm0 {%k1} {z}
|
|
; X86-NEXT: vaddps %zmm0, %zmm2, %zmm0
|
|
; X86-NEXT: vaddps %zmm3, %zmm0, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float> %x0, <16 x i32> %x1)
|
|
%2 = bitcast i16 %x3 to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %x2
|
|
%4 = call <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float> %x0, <16 x i32> %x1)
|
|
%5 = bitcast i16 %x3 to <16 x i1>
|
|
%6 = select <16 x i1> %5, <16 x float> %4, <16 x float> zeroinitializer
|
|
%7 = call <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float> %x0, <16 x i32> %x1)
|
|
%res3 = fadd <16 x float> %3, %6
|
|
%res4 = fadd <16 x float> %res3, %7
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32>, <16 x i32>)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_permvar_si_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vpermd %zmm0, %zmm1, %zmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpermd %zmm0, %zmm1, %zmm2 {%k1}
|
|
; X64-NEXT: vpermd %zmm0, %zmm1, %zmm0 {%k1} {z}
|
|
; X64-NEXT: vpaddd %zmm3, %zmm0, %zmm0
|
|
; X64-NEXT: vpaddd %zmm0, %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_permvar_si_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vpermd %zmm0, %zmm1, %zmm3
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpermd %zmm0, %zmm1, %zmm2 {%k1}
|
|
; X86-NEXT: vpermd %zmm0, %zmm1, %zmm0 {%k1} {z}
|
|
; X86-NEXT: vpaddd %zmm3, %zmm0, %zmm0
|
|
; X86-NEXT: vpaddd %zmm0, %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%1 = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> %x0, <16 x i32> %x1)
|
|
%2 = bitcast i16 %x3 to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
|
|
%4 = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> %x0, <16 x i32> %x1)
|
|
%5 = bitcast i16 %x3 to <16 x i1>
|
|
%6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
|
|
%7 = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> %x0, <16 x i32> %x1)
|
|
%res3 = add <16 x i32> %3, %6
|
|
%res4 = add <16 x i32> %res3, %7
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double>, <8 x double>, <8 x i64>, i32, i8, i32)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovapd %zmm0, %zmm3
|
|
; X64-NEXT: vfixupimmpd $4, %zmm2, %zmm1, %zmm3 {%k1}
|
|
; X64-NEXT: vxorpd %xmm4, %xmm4, %xmm4
|
|
; X64-NEXT: vfixupimmpd $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
|
|
; X64-NEXT: vaddpd %zmm4, %zmm3, %zmm3
|
|
; X64-NEXT: vfixupimmpd $3, {sae}, %zmm2, %zmm1, %zmm0
|
|
; X64-NEXT: vaddpd %zmm0, %zmm3, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %zmm0, %zmm3
|
|
; X86-NEXT: vfixupimmpd $4, %zmm2, %zmm1, %zmm3 {%k1}
|
|
; X86-NEXT: vxorpd %xmm4, %xmm4, %xmm4
|
|
; X86-NEXT: vfixupimmpd $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
|
|
; X86-NEXT: vaddpd %zmm4, %zmm3, %zmm3
|
|
; X86-NEXT: vfixupimmpd $3, {sae}, %zmm2, %zmm1, %zmm0
|
|
; X86-NEXT: vaddpd %zmm0, %zmm3, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 4, i8 %x4, i32 4)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> zeroinitializer, <8 x double> %x1, <8 x i64> %x2, i32 5, i8 %x4, i32 4)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 3, i8 -1, i32 8)
|
|
%res3 = fadd <8 x double> %res, %res1
|
|
%res4 = fadd <8 x double> %res3, %res2
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512_load(<8 x double> %x0, <8 x double> %x1, <8 x i64>* %x2ptr) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512_load:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vfixupimmpd $3, (%rdi), %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512_load:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vfixupimmpd $3, (%eax), %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%x2 = load <8 x i64>, <8 x i64>* %x2ptr
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 3, i8 -1, i32 4)
|
|
ret <8 x double> %res
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double>, <8 x double>, <8 x i64>, i32, i8, i32)
|
|
|
|
define <8 x double>@test_int_x86_avx512_maskz_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_maskz_fixupimm_pd_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovapd %zmm0, %zmm3
|
|
; X64-NEXT: vfixupimmpd $3, %zmm2, %zmm1, %zmm3 {%k1} {z}
|
|
; X64-NEXT: vxorpd %xmm4, %xmm4, %xmm4
|
|
; X64-NEXT: vmovapd %zmm0, %zmm5
|
|
; X64-NEXT: vfixupimmpd $5, %zmm4, %zmm1, %zmm5 {%k1} {z}
|
|
; X64-NEXT: vaddpd %zmm5, %zmm3, %zmm3
|
|
; X64-NEXT: vfixupimmpd $2, {sae}, %zmm2, %zmm1, %zmm0
|
|
; X64-NEXT: vaddpd %zmm0, %zmm3, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_fixupimm_pd_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %zmm0, %zmm3
|
|
; X86-NEXT: vfixupimmpd $3, %zmm2, %zmm1, %zmm3 {%k1} {z}
|
|
; X86-NEXT: vxorpd %xmm4, %xmm4, %xmm4
|
|
; X86-NEXT: vmovapd %zmm0, %zmm5
|
|
; X86-NEXT: vfixupimmpd $5, %zmm4, %zmm1, %zmm5 {%k1} {z}
|
|
; X86-NEXT: vaddpd %zmm5, %zmm3, %zmm3
|
|
; X86-NEXT: vfixupimmpd $2, {sae}, %zmm2, %zmm1, %zmm0
|
|
; X86-NEXT: vaddpd %zmm0, %zmm3, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 3, i8 %x4, i32 4)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> zeroinitializer, i32 5, i8 %x4, i32 4)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 2, i8 -1, i32 8)
|
|
%res3 = fadd <8 x double> %res, %res1
|
|
%res4 = fadd <8 x double> %res3, %res2
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float>, <4 x float>, <4 x i32>, i32, i8, i32)
|
|
|
|
define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_fixupimm_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovaps %xmm0, %xmm3
|
|
; X64-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1}
|
|
; X64-NEXT: vxorps %xmm4, %xmm4, %xmm4
|
|
; X64-NEXT: vmovaps %xmm0, %xmm5
|
|
; X64-NEXT: vfixupimmss $5, %xmm4, %xmm1, %xmm5 {%k1}
|
|
; X64-NEXT: vaddps %xmm5, %xmm3, %xmm3
|
|
; X64-NEXT: vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0
|
|
; X64-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_fixupimm_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovaps %xmm0, %xmm3
|
|
; X86-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1}
|
|
; X86-NEXT: vxorps %xmm4, %xmm4, %xmm4
|
|
; X86-NEXT: vmovaps %xmm0, %xmm5
|
|
; X86-NEXT: vfixupimmss $5, %xmm4, %xmm1, %xmm5 {%k1}
|
|
; X86-NEXT: vaddps %xmm5, %xmm3, %xmm3
|
|
; X86-NEXT: vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0
|
|
; X86-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4, i32 4)
|
|
%res1 = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4, i32 4)
|
|
%res2 = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 -1, i32 8)
|
|
%res3 = fadd <4 x float> %res, %res1
|
|
%res4 = fadd <4 x float> %res3, %res2
|
|
ret <4 x float> %res4
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float>, <4 x float>, <4 x i32>, i32, i8, i32)
|
|
|
|
define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_maskz_fixupimm_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovaps %xmm0, %xmm3
|
|
; X64-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3
|
|
; X64-NEXT: vmovaps %xmm0, %xmm4
|
|
; X64-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm4 {%k1} {z}
|
|
; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vaddps %xmm0, %xmm4, %xmm0
|
|
; X64-NEXT: vaddps %xmm3, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_fixupimm_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovaps %xmm0, %xmm3
|
|
; X86-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
|
|
; X86-NEXT: vmovaps %xmm0, %xmm4
|
|
; X86-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm4
|
|
; X86-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
|
; X86-NEXT: vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: vaddps %xmm4, %xmm0, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4, i32 4)
|
|
%res1 = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4, i32 8)
|
|
%res2 = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 -1, i32 4)
|
|
%res3 = fadd <4 x float> %res, %res1
|
|
%res4 = fadd <4 x float> %res3, %res2
|
|
ret <4 x float> %res4
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float>, <16 x float>, <16 x i32>, i32, i16, i32)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovaps %zmm0, %zmm3
|
|
; X64-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm3 {%k1}
|
|
; X64-NEXT: vxorps %xmm4, %xmm4, %xmm4
|
|
; X64-NEXT: vmovaps %zmm0, %zmm5
|
|
; X64-NEXT: vfixupimmps $5, %zmm4, %zmm1, %zmm5 {%k1}
|
|
; X64-NEXT: vaddps %zmm5, %zmm3, %zmm3
|
|
; X64-NEXT: vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0
|
|
; X64-NEXT: vaddps %zmm0, %zmm3, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmovaps %zmm0, %zmm3
|
|
; X86-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm3 {%k1}
|
|
; X86-NEXT: vxorps %xmm4, %xmm4, %xmm4
|
|
; X86-NEXT: vmovaps %zmm0, %zmm5
|
|
; X86-NEXT: vfixupimmps $5, %zmm4, %zmm1, %zmm5 {%k1}
|
|
; X86-NEXT: vaddps %zmm5, %zmm3, %zmm3
|
|
; X86-NEXT: vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0
|
|
; X86-NEXT: vaddps %zmm0, %zmm3, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 %x4, i32 4)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> zeroinitializer, i32 5, i16 %x4, i32 4)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 -1, i32 8)
|
|
%res3 = fadd <16 x float> %res, %res1
|
|
%res4 = fadd <16 x float> %res3, %res2
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512_load(<16 x float> %x0, <16 x float> %x1, <16 x i32>* %x2ptr) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512_load:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vfixupimmps $5, (%rdi), %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512_load:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: vfixupimmps $5, (%eax), %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 -1, i32 4)
|
|
ret <16 x float> %res
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float>, <16 x float>, <16 x i32>, i32, i16, i32)
|
|
|
|
define <16 x float>@test_int_x86_avx512_maskz_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_maskz_fixupimm_ps_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovaps %zmm0, %zmm3
|
|
; X64-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm3
|
|
; X64-NEXT: vmovaps %zmm0, %zmm4
|
|
; X64-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
|
|
; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
; X64-NEXT: vaddps %zmm0, %zmm4, %zmm0
|
|
; X64-NEXT: vaddps %zmm3, %zmm0, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_fixupimm_ps_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vmovaps %zmm0, %zmm3
|
|
; X86-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm3 {%k1} {z}
|
|
; X86-NEXT: vmovaps %zmm0, %zmm4
|
|
; X86-NEXT: vfixupimmps $5, %zmm2, %zmm1, %zmm4
|
|
; X86-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
|
; X86-NEXT: vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
; X86-NEXT: vaddps %zmm0, %zmm3, %zmm0
|
|
; X86-NEXT: vaddps %zmm4, %zmm0, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 %x4, i32 4)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> zeroinitializer, i32 5, i16 %x4, i32 8)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 -1, i32 4)
|
|
%res3 = fadd <16 x float> %res, %res1
|
|
%res4 = fadd <16 x float> %res3, %res2
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double>, <2 x double>, <2 x i64>, i32, i8, i32)
|
|
|
|
define <2 x double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_fixupimm_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovapd %xmm0, %xmm3
|
|
; X64-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3
|
|
; X64-NEXT: vmovapd %xmm0, %xmm4
|
|
; X64-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm4 {%k1}
|
|
; X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
|
|
; X64-NEXT: vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1}
|
|
; X64-NEXT: vaddpd %xmm0, %xmm4, %xmm0
|
|
; X64-NEXT: vaddpd %xmm3, %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_fixupimm_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %xmm0, %xmm3
|
|
; X86-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1}
|
|
; X86-NEXT: vmovapd %xmm0, %xmm4
|
|
; X86-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm4
|
|
; X86-NEXT: vxorpd %xmm2, %xmm2, %xmm2
|
|
; X86-NEXT: vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1}
|
|
; X86-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: vaddpd %xmm4, %xmm0, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 4)
|
|
%res1 = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> zeroinitializer, i32 5, i8 %x4, i32 8)
|
|
%res2 = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 -1, i32 4)
|
|
%res3 = fadd <2 x double> %res, %res1
|
|
%res4 = fadd <2 x double> %res3, %res2
|
|
ret <2 x double> %res4
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double>, <2 x double>, <2 x i64>, i32, i8, i32)
|
|
|
|
define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_maskz_fixupimm_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovapd %xmm0, %xmm3
|
|
; X64-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
|
|
; X64-NEXT: vxorpd %xmm4, %xmm4, %xmm4
|
|
; X64-NEXT: vmovapd %xmm0, %xmm5
|
|
; X64-NEXT: vfixupimmsd $5, {sae}, %xmm4, %xmm1, %xmm5 {%k1} {z}
|
|
; X64-NEXT: vaddpd %xmm5, %xmm3, %xmm3
|
|
; X64-NEXT: vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_fixupimm_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %xmm0, %xmm3
|
|
; X86-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
|
|
; X86-NEXT: vxorpd %xmm4, %xmm4, %xmm4
|
|
; X86-NEXT: vmovapd %xmm0, %xmm5
|
|
; X86-NEXT: vfixupimmsd $5, {sae}, %xmm4, %xmm1, %xmm5 {%k1} {z}
|
|
; X86-NEXT: vaddpd %xmm5, %xmm3, %xmm3
|
|
; X86-NEXT: vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%res = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 4)
|
|
%res1 = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> zeroinitializer, i32 5, i8 %x4, i32 8)
|
|
%res2 = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 8)
|
|
%res3 = fadd <2 x double> %res, %res1
|
|
%res4 = fadd <2 x double> %res3, %res2
|
|
ret <2 x double> %res4
|
|
}
|
|
|
|
declare double @llvm.fma.f64(double, double, double) #1
|
|
declare double @llvm.x86.avx512.vfmadd.f64(double, double, double, i32) #0
|
|
|
|
define <2 x double>@test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
|
|
; X64-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovapd %xmm0, %xmm3
|
|
; X64-NEXT: vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovapd %xmm0, %xmm4
|
|
; X64-NEXT: vfmadd213sd {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
|
|
; X64-NEXT: vaddpd %xmm4, %xmm3, %xmm3
|
|
; X64-NEXT: vmovapd %xmm0, %xmm4
|
|
; X64-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm4
|
|
; X64-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
|
|
; X64-NEXT: vaddpd %xmm0, %xmm4, %xmm0
|
|
; X64-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: vmovapd %xmm0, %xmm3
|
|
; X86-NEXT: vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %xmm0, %xmm4
|
|
; X86-NEXT: vfmadd213sd {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
|
|
; X86-NEXT: vaddpd %xmm4, %xmm3, %xmm3
|
|
; X86-NEXT: vmovapd %xmm0, %xmm4
|
|
; X86-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm4
|
|
; X86-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
|
|
; X86-NEXT: vaddpd %xmm0, %xmm4, %xmm0
|
|
; X86-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%1 = extractelement <2 x double> %x0, i64 0
|
|
%2 = extractelement <2 x double> %x1, i64 0
|
|
%3 = extractelement <2 x double> %x2, i64 0
|
|
%4 = call double @llvm.fma.f64(double %1, double %2, double %3)
|
|
%5 = insertelement <2 x double> %x0, double %4, i64 0
|
|
%6 = extractelement <2 x double> %x0, i64 0
|
|
%7 = extractelement <2 x double> %x1, i64 0
|
|
%8 = extractelement <2 x double> %x2, i64 0
|
|
%9 = call double @llvm.fma.f64(double %6, double %7, double %8)
|
|
%10 = bitcast i8 %x3 to <8 x i1>
|
|
%11 = extractelement <8 x i1> %10, i64 0
|
|
%12 = select i1 %11, double %9, double %6
|
|
%13 = insertelement <2 x double> %x0, double %12, i64 0
|
|
%14 = extractelement <2 x double> %x0, i64 0
|
|
%15 = extractelement <2 x double> %x1, i64 0
|
|
%16 = extractelement <2 x double> %x2, i64 0
|
|
%17 = call double @llvm.x86.avx512.vfmadd.f64(double %14, double %15, double %16, i32 11)
|
|
%18 = insertelement <2 x double> %x0, double %17, i64 0
|
|
%19 = extractelement <2 x double> %x0, i64 0
|
|
%20 = extractelement <2 x double> %x1, i64 0
|
|
%21 = extractelement <2 x double> %x2, i64 0
|
|
%22 = call double @llvm.x86.avx512.vfmadd.f64(double %19, double %20, double %21, i32 11)
|
|
%23 = bitcast i8 %x3 to <8 x i1>
|
|
%24 = extractelement <8 x i1> %23, i64 0
|
|
%25 = select i1 %24, double %22, double %19
|
|
%26 = insertelement <2 x double> %x0, double %25, i64 0
|
|
%res4 = fadd <2 x double> %5, %13
|
|
%res5 = fadd <2 x double> %18, %26
|
|
%res6 = fadd <2 x double> %res4, %res5
|
|
ret <2 x double> %res6
|
|
}
|
|
|
|
define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
|
|
; X64-LABEL: test_int_x86_avx512_mask_vfmadd_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovaps %xmm0, %xmm3
|
|
; X64-NEXT: vfmadd213ss {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovaps %xmm0, %xmm4
|
|
; X64-NEXT: vfmadd213ss {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
|
|
; X64-NEXT: vaddps %xmm4, %xmm3, %xmm3
|
|
; X64-NEXT: vmovaps %xmm0, %xmm4
|
|
; X64-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm4
|
|
; X64-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
|
|
; X64-NEXT: vaddps %xmm0, %xmm4, %xmm0
|
|
; X64-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_vfmadd_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: vmovaps %xmm0, %xmm3
|
|
; X86-NEXT: vfmadd213ss {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovaps %xmm0, %xmm4
|
|
; X86-NEXT: vfmadd213ss {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
|
|
; X86-NEXT: vaddps %xmm4, %xmm3, %xmm3
|
|
; X86-NEXT: vmovaps %xmm0, %xmm4
|
|
; X86-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm4
|
|
; X86-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
|
|
; X86-NEXT: vaddps %xmm0, %xmm4, %xmm0
|
|
; X86-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%1 = extractelement <4 x float> %x0, i64 0
|
|
%2 = extractelement <4 x float> %x1, i64 0
|
|
%3 = extractelement <4 x float> %x2, i64 0
|
|
%4 = call float @llvm.fma.f32(float %1, float %2, float %3)
|
|
%5 = insertelement <4 x float> %x0, float %4, i64 0
|
|
%6 = extractelement <4 x float> %x0, i64 0
|
|
%7 = extractelement <4 x float> %x1, i64 0
|
|
%8 = extractelement <4 x float> %x2, i64 0
|
|
%9 = call float @llvm.fma.f32(float %6, float %7, float %8)
|
|
%10 = bitcast i8 %x3 to <8 x i1>
|
|
%11 = extractelement <8 x i1> %10, i64 0
|
|
%12 = select i1 %11, float %9, float %6
|
|
%13 = insertelement <4 x float> %x0, float %12, i64 0
|
|
%14 = extractelement <4 x float> %x0, i64 0
|
|
%15 = extractelement <4 x float> %x1, i64 0
|
|
%16 = extractelement <4 x float> %x2, i64 0
|
|
%17 = call float @llvm.x86.avx512.vfmadd.f32(float %14, float %15, float %16, i32 11)
|
|
%18 = insertelement <4 x float> %x0, float %17, i64 0
|
|
%19 = extractelement <4 x float> %x0, i64 0
|
|
%20 = extractelement <4 x float> %x1, i64 0
|
|
%21 = extractelement <4 x float> %x2, i64 0
|
|
%22 = call float @llvm.x86.avx512.vfmadd.f32(float %19, float %20, float %21, i32 11)
|
|
%23 = bitcast i8 %x3 to <8 x i1>
|
|
%24 = extractelement <8 x i1> %23, i64 0
|
|
%25 = select i1 %24, float %22, float %19
|
|
%26 = insertelement <4 x float> %x0, float %25, i64 0
|
|
%res4 = fadd <4 x float> %5, %13
|
|
%res5 = fadd <4 x float> %18, %26
|
|
%res6 = fadd <4 x float> %res4, %res5
|
|
ret <4 x float> %res6
|
|
}
|
|
|
|
define <2 x double>@test_int_x86_avx512_maskz_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
|
|
; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovapd %xmm0, %xmm3
|
|
; X64-NEXT: vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
|
|
; X64-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %xmm0, %xmm3
|
|
; X86-NEXT: vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
|
|
; X86-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%1 = extractelement <2 x double> %x0, i64 0
|
|
%2 = extractelement <2 x double> %x1, i64 0
|
|
%3 = extractelement <2 x double> %x2, i64 0
|
|
%4 = call double @llvm.fma.f64(double %1, double %2, double %3)
|
|
%5 = bitcast i8 %x3 to <8 x i1>
|
|
%6 = extractelement <8 x i1> %5, i64 0
|
|
%7 = select i1 %6, double %4, double 0.000000e+00
|
|
%8 = insertelement <2 x double> %x0, double %7, i64 0
|
|
%9 = extractelement <2 x double> %x0, i64 0
|
|
%10 = extractelement <2 x double> %x1, i64 0
|
|
%11 = extractelement <2 x double> %x2, i64 0
|
|
%12 = call double @llvm.x86.avx512.vfmadd.f64(double %9, double %10, double %11, i32 11)
|
|
%13 = bitcast i8 %x3 to <8 x i1>
|
|
%14 = extractelement <8 x i1> %13, i64 0
|
|
%15 = select i1 %14, double %12, double 0.000000e+00
|
|
%16 = insertelement <2 x double> %x0, double %15, i64 0
|
|
%res2 = fadd <2 x double> %8, %16
|
|
ret <2 x double> %res2
|
|
}
|
|
|
|
declare float @llvm.fma.f32(float, float, float) #1
|
|
declare float @llvm.x86.avx512.vfmadd.f32(float, float, float, i32) #0
|
|
|
|
define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
|
|
; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
|
|
; X86-NEXT: retl
|
|
%1 = extractelement <4 x float> %x0, i64 0
|
|
%2 = extractelement <4 x float> %x1, i64 0
|
|
%3 = extractelement <4 x float> %x2, i64 0
|
|
%4 = call float @llvm.fma.f32(float %1, float %2, float %3)
|
|
%5 = bitcast i8 %x3 to <8 x i1>
|
|
%6 = extractelement <8 x i1> %5, i64 0
|
|
%7 = select i1 %6, float %4, float 0.000000e+00
|
|
%8 = insertelement <4 x float> %x0, float %7, i64 0
|
|
%9 = extractelement <4 x float> %x0, i64 0
|
|
%10 = extractelement <4 x float> %x1, i64 0
|
|
%11 = extractelement <4 x float> %x2, i64 0
|
|
%12 = call float @llvm.x86.avx512.vfmadd.f32(float %9, float %10, float %11, i32 11)
|
|
%13 = bitcast i8 %x3 to <8 x i1>
|
|
%14 = extractelement <8 x i1> %13, i64 0
|
|
%15 = select i1 %14, float %12, float 0.000000e+00
|
|
%16 = insertelement <4 x float> %x0, float %15, i64 0
|
|
%res2 = fadd <4 x float> %8, %16
|
|
ret <4 x float> %8
|
|
}
|
|
|
|
define <2 x double>@test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
|
|
; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovapd %xmm2, %xmm3
|
|
; X64-NEXT: vfmadd231sd {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovapd %xmm2, %xmm4
|
|
; X64-NEXT: vfmadd231sd {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
|
|
; X64-NEXT: vaddpd %xmm4, %xmm3, %xmm3
|
|
; X64-NEXT: vmovapd %xmm2, %xmm4
|
|
; X64-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X64-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vaddpd %xmm2, %xmm4, %xmm0
|
|
; X64-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: vmovapd %xmm2, %xmm3
|
|
; X86-NEXT: vfmadd231sd {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %xmm2, %xmm4
|
|
; X86-NEXT: vfmadd231sd {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
|
|
; X86-NEXT: vaddpd %xmm4, %xmm3, %xmm3
|
|
; X86-NEXT: vmovapd %xmm2, %xmm4
|
|
; X86-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X86-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vaddpd %xmm2, %xmm4, %xmm0
|
|
; X86-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%1 = extractelement <2 x double> %x0, i64 0
|
|
%2 = extractelement <2 x double> %x1, i64 0
|
|
%3 = extractelement <2 x double> %x2, i64 0
|
|
%4 = call double @llvm.fma.f64(double %1, double %2, double %3)
|
|
%5 = insertelement <2 x double> %x2, double %4, i64 0
|
|
%6 = extractelement <2 x double> %x0, i64 0
|
|
%7 = extractelement <2 x double> %x1, i64 0
|
|
%8 = extractelement <2 x double> %x2, i64 0
|
|
%9 = call double @llvm.fma.f64(double %6, double %7, double %8)
|
|
%10 = bitcast i8 %x3 to <8 x i1>
|
|
%11 = extractelement <8 x i1> %10, i64 0
|
|
%12 = select i1 %11, double %9, double %8
|
|
%13 = insertelement <2 x double> %x2, double %12, i64 0
|
|
%14 = extractelement <2 x double> %x0, i64 0
|
|
%15 = extractelement <2 x double> %x1, i64 0
|
|
%16 = extractelement <2 x double> %x2, i64 0
|
|
%17 = call double @llvm.x86.avx512.vfmadd.f64(double %14, double %15, double %16, i32 11)
|
|
%18 = insertelement <2 x double> %x2, double %17, i64 0
|
|
%19 = extractelement <2 x double> %x0, i64 0
|
|
%20 = extractelement <2 x double> %x1, i64 0
|
|
%21 = extractelement <2 x double> %x2, i64 0
|
|
%22 = call double @llvm.x86.avx512.vfmadd.f64(double %19, double %20, double %21, i32 11)
|
|
%23 = bitcast i8 %x3 to <8 x i1>
|
|
%24 = extractelement <8 x i1> %23, i64 0
|
|
%25 = select i1 %24, double %22, double %21
|
|
%26 = insertelement <2 x double> %x2, double %25, i64 0
|
|
%res4 = fadd <2 x double> %5, %13
|
|
%res5 = fadd <2 x double> %18, %26
|
|
%res6 = fadd <2 x double> %res4, %res5
|
|
ret <2 x double> %res6
|
|
}
|
|
|
|
define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
|
|
; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovaps %xmm2, %xmm3
|
|
; X64-NEXT: vfmadd231ss {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovaps %xmm2, %xmm4
|
|
; X64-NEXT: vfmadd231ss {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
|
|
; X64-NEXT: vaddps %xmm4, %xmm3, %xmm3
|
|
; X64-NEXT: vmovaps %xmm2, %xmm4
|
|
; X64-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X64-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vaddps %xmm2, %xmm4, %xmm0
|
|
; X64-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: vmovaps %xmm2, %xmm3
|
|
; X86-NEXT: vfmadd231ss {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovaps %xmm2, %xmm4
|
|
; X86-NEXT: vfmadd231ss {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
|
|
; X86-NEXT: vaddps %xmm4, %xmm3, %xmm3
|
|
; X86-NEXT: vmovaps %xmm2, %xmm4
|
|
; X86-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X86-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vaddps %xmm2, %xmm4, %xmm0
|
|
; X86-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%1 = extractelement <4 x float> %x0, i64 0
|
|
%2 = extractelement <4 x float> %x1, i64 0
|
|
%3 = extractelement <4 x float> %x2, i64 0
|
|
%4 = call float @llvm.fma.f32(float %1, float %2, float %3)
|
|
%5 = insertelement <4 x float> %x2, float %4, i64 0
|
|
%6 = extractelement <4 x float> %x0, i64 0
|
|
%7 = extractelement <4 x float> %x1, i64 0
|
|
%8 = extractelement <4 x float> %x2, i64 0
|
|
%9 = call float @llvm.fma.f32(float %6, float %7, float %8)
|
|
%10 = bitcast i8 %x3 to <8 x i1>
|
|
%11 = extractelement <8 x i1> %10, i64 0
|
|
%12 = select i1 %11, float %9, float %8
|
|
%13 = insertelement <4 x float> %x2, float %12, i64 0
|
|
%14 = extractelement <4 x float> %x0, i64 0
|
|
%15 = extractelement <4 x float> %x1, i64 0
|
|
%16 = extractelement <4 x float> %x2, i64 0
|
|
%17 = call float @llvm.x86.avx512.vfmadd.f32(float %14, float %15, float %16, i32 11)
|
|
%18 = insertelement <4 x float> %x2, float %17, i64 0
|
|
%19 = extractelement <4 x float> %x0, i64 0
|
|
%20 = extractelement <4 x float> %x1, i64 0
|
|
%21 = extractelement <4 x float> %x2, i64 0
|
|
%22 = call float @llvm.x86.avx512.vfmadd.f32(float %19, float %20, float %21, i32 11)
|
|
%23 = bitcast i8 %x3 to <8 x i1>
|
|
%24 = extractelement <8 x i1> %23, i64 0
|
|
%25 = select i1 %24, float %22, float %21
|
|
%26 = insertelement <4 x float> %x2, float %25, i64 0
|
|
%res4 = fadd <4 x float> %5, %13
|
|
%res5 = fadd <4 x float> %18, %26
|
|
%res6 = fadd <4 x float> %res4, %res5
|
|
ret <4 x float> %res6
|
|
}
|
|
|
|
define void @fmadd_ss_mask_memfold(float* %a, float* %b, i8 %c) {
|
|
; X64-LABEL: fmadd_ss_mask_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; X64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
; X64-NEXT: vfmadd213ss {{.*#+}} xmm1 = (xmm0 * xmm1) + xmm0
|
|
; X64-NEXT: kmovw %edx, %k1
|
|
; X64-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
|
|
; X64-NEXT: vmovss %xmm0, (%rdi)
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: fmadd_ss_mask_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
; X86-NEXT: vfmadd213ss {{.*#+}} xmm1 = (xmm0 * xmm1) + xmm0
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
|
|
; X86-NEXT: vmovss %xmm0, (%edx)
|
|
; X86-NEXT: retl
|
|
%a.val = load float, float* %a
|
|
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
|
|
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
|
|
%av2 = insertelement <4 x float> %av1, float 0.000000e+00, i32 2
|
|
%av = insertelement <4 x float> %av2, float 0.000000e+00, i32 3
|
|
|
|
%b.val = load float, float* %b
|
|
%bv0 = insertelement <4 x float> undef, float %b.val, i32 0
|
|
%bv1 = insertelement <4 x float> %bv0, float 0.000000e+00, i32 1
|
|
%bv2 = insertelement <4 x float> %bv1, float 0.000000e+00, i32 2
|
|
%bv = insertelement <4 x float> %bv2, float 0.000000e+00, i32 3
|
|
%1 = extractelement <4 x float> %av, i64 0
|
|
%2 = extractelement <4 x float> %bv, i64 0
|
|
%3 = extractelement <4 x float> %av, i64 0
|
|
%4 = call float @llvm.fma.f32(float %1, float %2, float %3)
|
|
%5 = bitcast i8 %c to <8 x i1>
|
|
%6 = extractelement <8 x i1> %5, i64 0
|
|
%7 = select i1 %6, float %4, float %1
|
|
%8 = insertelement <4 x float> %av, float %7, i64 0
|
|
%sr = extractelement <4 x float> %8, i32 0
|
|
store float %sr, float* %a
|
|
ret void
|
|
}
|
|
|
|
define void @fmadd_ss_maskz_memfold(float* %a, float* %b, i8 %c) {
|
|
; X64-LABEL: fmadd_ss_maskz_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; X64-NEXT: vfmadd231ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; X64-NEXT: kmovw %edx, %k1
|
|
; X64-NEXT: vmovss %xmm0, %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vmovss %xmm0, (%rdi)
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: fmadd_ss_maskz_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; X86-NEXT: vfmadd231ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovss %xmm0, %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vmovss %xmm0, (%edx)
|
|
; X86-NEXT: retl
|
|
%a.val = load float, float* %a
|
|
%av0 = insertelement <4 x float> undef, float %a.val, i32 0
|
|
%av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
|
|
%av2 = insertelement <4 x float> %av1, float 0.000000e+00, i32 2
|
|
%av = insertelement <4 x float> %av2, float 0.000000e+00, i32 3
|
|
|
|
%b.val = load float, float* %b
|
|
%bv0 = insertelement <4 x float> undef, float %b.val, i32 0
|
|
%bv1 = insertelement <4 x float> %bv0, float 0.000000e+00, i32 1
|
|
%bv2 = insertelement <4 x float> %bv1, float 0.000000e+00, i32 2
|
|
%bv = insertelement <4 x float> %bv2, float 0.000000e+00, i32 3
|
|
%1 = extractelement <4 x float> %av, i64 0
|
|
%2 = extractelement <4 x float> %bv, i64 0
|
|
%3 = extractelement <4 x float> %av, i64 0
|
|
%4 = call float @llvm.fma.f32(float %1, float %2, float %3)
|
|
%5 = bitcast i8 %c to <8 x i1>
|
|
%6 = extractelement <8 x i1> %5, i64 0
|
|
%7 = select i1 %6, float %4, float 0.000000e+00
|
|
%8 = insertelement <4 x float> %av, float %7, i64 0
|
|
%sr = extractelement <4 x float> %8, i32 0
|
|
store float %sr, float* %a
|
|
ret void
|
|
}
|
|
|
|
define void @fmadd_sd_mask_memfold(double* %a, double* %b, i8 %c) {
|
|
; X64-LABEL: fmadd_sd_mask_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
|
; X64-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
|
|
; X64-NEXT: vfmadd213sd {{.*#+}} xmm1 = (xmm0 * xmm1) + xmm0
|
|
; X64-NEXT: kmovw %edx, %k1
|
|
; X64-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
|
|
; X64-NEXT: vmovsd %xmm0, (%rdi)
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: fmadd_sd_mask_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
|
; X86-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
|
|
; X86-NEXT: vfmadd213sd {{.*#+}} xmm1 = (xmm0 * xmm1) + xmm0
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
|
|
; X86-NEXT: vmovsd %xmm0, (%edx)
|
|
; X86-NEXT: retl
|
|
%a.val = load double, double* %a
|
|
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
|
|
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
|
|
|
|
%b.val = load double, double* %b
|
|
%bv0 = insertelement <2 x double> undef, double %b.val, i32 0
|
|
%bv = insertelement <2 x double> %bv0, double 0.000000e+00, i32 1
|
|
%1 = extractelement <2 x double> %av, i64 0
|
|
%2 = extractelement <2 x double> %bv, i64 0
|
|
%3 = extractelement <2 x double> %av, i64 0
|
|
%4 = call double @llvm.fma.f64(double %1, double %2, double %3)
|
|
%5 = bitcast i8 %c to <8 x i1>
|
|
%6 = extractelement <8 x i1> %5, i64 0
|
|
%7 = select i1 %6, double %4, double %1
|
|
%8 = insertelement <2 x double> %av, double %7, i64 0
|
|
%sr = extractelement <2 x double> %8, i32 0
|
|
store double %sr, double* %a
|
|
ret void
|
|
}
|
|
|
|
define void @fmadd_sd_maskz_memfold(double* %a, double* %b, i8 %c) {
|
|
; X64-LABEL: fmadd_sd_maskz_memfold:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
|
; X64-NEXT: vfmadd231sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; X64-NEXT: kmovw %edx, %k1
|
|
; X64-NEXT: vmovsd %xmm0, %xmm0, %xmm0 {%k1} {z}
|
|
; X64-NEXT: vmovsd %xmm0, (%rdi)
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: fmadd_sd_maskz_memfold:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
|
; X86-NEXT: vfmadd231sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovsd %xmm0, %xmm0, %xmm0 {%k1} {z}
|
|
; X86-NEXT: vmovsd %xmm0, (%edx)
|
|
; X86-NEXT: retl
|
|
%a.val = load double, double* %a
|
|
%av0 = insertelement <2 x double> undef, double %a.val, i32 0
|
|
%av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
|
|
|
|
%b.val = load double, double* %b
|
|
%bv0 = insertelement <2 x double> undef, double %b.val, i32 0
|
|
%bv = insertelement <2 x double> %bv0, double 0.000000e+00, i32 1
|
|
%1 = extractelement <2 x double> %av, i64 0
|
|
%2 = extractelement <2 x double> %bv, i64 0
|
|
%3 = extractelement <2 x double> %av, i64 0
|
|
%4 = call double @llvm.fma.f64(double %1, double %2, double %3)
|
|
%5 = bitcast i8 %c to <8 x i1>
|
|
%6 = extractelement <8 x i1> %5, i64 0
|
|
%7 = select i1 %6, double %4, double 0.000000e+00
|
|
%8 = insertelement <2 x double> %av, double %7, i64 0
|
|
%sr = extractelement <2 x double> %8, i32 0
|
|
store double %sr, double* %a
|
|
ret void
|
|
}
|
|
|
|
define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
|
|
; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovapd %xmm2, %xmm3
|
|
; X64-NEXT: vfmsub231sd {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovapd %xmm2, %xmm4
|
|
; X64-NEXT: vfmsub231sd {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
|
|
; X64-NEXT: vaddpd %xmm4, %xmm3, %xmm3
|
|
; X64-NEXT: vmovapd %xmm2, %xmm4
|
|
; X64-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X64-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vaddpd %xmm2, %xmm4, %xmm0
|
|
; X64-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask3_vfmsub_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: vmovapd %xmm2, %xmm3
|
|
; X86-NEXT: vfmsub231sd {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %xmm2, %xmm4
|
|
; X86-NEXT: vfmsub231sd {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
|
|
; X86-NEXT: vaddpd %xmm4, %xmm3, %xmm3
|
|
; X86-NEXT: vmovapd %xmm2, %xmm4
|
|
; X86-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X86-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vaddpd %xmm2, %xmm4, %xmm0
|
|
; X86-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%1 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x2
|
|
%2 = extractelement <2 x double> %x0, i64 0
|
|
%3 = extractelement <2 x double> %x1, i64 0
|
|
%4 = extractelement <2 x double> %1, i64 0
|
|
%5 = call double @llvm.fma.f64(double %2, double %3, double %4)
|
|
%6 = extractelement <2 x double> %x2, i64 0
|
|
%7 = insertelement <2 x double> %x2, double %5, i64 0
|
|
%8 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x2
|
|
%9 = extractelement <2 x double> %x0, i64 0
|
|
%10 = extractelement <2 x double> %x1, i64 0
|
|
%11 = extractelement <2 x double> %8, i64 0
|
|
%12 = call double @llvm.fma.f64(double %9, double %10, double %11)
|
|
%13 = extractelement <2 x double> %x2, i64 0
|
|
%14 = bitcast i8 %x3 to <8 x i1>
|
|
%15 = extractelement <8 x i1> %14, i64 0
|
|
%16 = select i1 %15, double %12, double %13
|
|
%17 = insertelement <2 x double> %x2, double %16, i64 0
|
|
%18 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x2
|
|
%19 = extractelement <2 x double> %x0, i64 0
|
|
%20 = extractelement <2 x double> %x1, i64 0
|
|
%21 = extractelement <2 x double> %18, i64 0
|
|
%22 = call double @llvm.x86.avx512.vfmadd.f64(double %19, double %20, double %21, i32 11)
|
|
%23 = extractelement <2 x double> %x2, i64 0
|
|
%24 = insertelement <2 x double> %x2, double %22, i64 0
|
|
%25 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x2
|
|
%26 = extractelement <2 x double> %x0, i64 0
|
|
%27 = extractelement <2 x double> %x1, i64 0
|
|
%28 = extractelement <2 x double> %25, i64 0
|
|
%29 = call double @llvm.x86.avx512.vfmadd.f64(double %26, double %27, double %28, i32 11)
|
|
%30 = extractelement <2 x double> %x2, i64 0
|
|
%31 = bitcast i8 %x3 to <8 x i1>
|
|
%32 = extractelement <8 x i1> %31, i64 0
|
|
%33 = select i1 %32, double %29, double %30
|
|
%34 = insertelement <2 x double> %x2, double %33, i64 0
|
|
%res4 = fadd <2 x double> %7, %17
|
|
%res5 = fadd <2 x double> %24, %34
|
|
%res6 = fadd <2 x double> %res4, %res5
|
|
ret <2 x double> %res6
|
|
}
|
|
|
|
define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
|
|
; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovaps %xmm2, %xmm3
|
|
; X64-NEXT: vfmsub231ss {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovaps %xmm2, %xmm4
|
|
; X64-NEXT: vfmsub231ss {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
|
|
; X64-NEXT: vaddps %xmm4, %xmm3, %xmm3
|
|
; X64-NEXT: vmovaps %xmm2, %xmm4
|
|
; X64-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X64-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vaddps %xmm2, %xmm4, %xmm0
|
|
; X64-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask3_vfmsub_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: vmovaps %xmm2, %xmm3
|
|
; X86-NEXT: vfmsub231ss {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovaps %xmm2, %xmm4
|
|
; X86-NEXT: vfmsub231ss {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
|
|
; X86-NEXT: vaddps %xmm4, %xmm3, %xmm3
|
|
; X86-NEXT: vmovaps %xmm2, %xmm4
|
|
; X86-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X86-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vaddps %xmm2, %xmm4, %xmm0
|
|
; X86-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x2
|
|
%2 = extractelement <4 x float> %x0, i64 0
|
|
%3 = extractelement <4 x float> %x1, i64 0
|
|
%4 = extractelement <4 x float> %1, i64 0
|
|
%5 = call float @llvm.fma.f32(float %2, float %3, float %4)
|
|
%6 = extractelement <4 x float> %x2, i64 0
|
|
%7 = insertelement <4 x float> %x2, float %5, i64 0
|
|
%8 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x2
|
|
%9 = extractelement <4 x float> %x0, i64 0
|
|
%10 = extractelement <4 x float> %x1, i64 0
|
|
%11 = extractelement <4 x float> %8, i64 0
|
|
%12 = call float @llvm.fma.f32(float %9, float %10, float %11)
|
|
%13 = extractelement <4 x float> %x2, i64 0
|
|
%14 = bitcast i8 %x3 to <8 x i1>
|
|
%15 = extractelement <8 x i1> %14, i64 0
|
|
%16 = select i1 %15, float %12, float %13
|
|
%17 = insertelement <4 x float> %x2, float %16, i64 0
|
|
%18 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x2
|
|
%19 = extractelement <4 x float> %x0, i64 0
|
|
%20 = extractelement <4 x float> %x1, i64 0
|
|
%21 = extractelement <4 x float> %18, i64 0
|
|
%22 = call float @llvm.x86.avx512.vfmadd.f32(float %19, float %20, float %21, i32 11)
|
|
%23 = extractelement <4 x float> %x2, i64 0
|
|
%24 = insertelement <4 x float> %x2, float %22, i64 0
|
|
%25 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x2
|
|
%26 = extractelement <4 x float> %x0, i64 0
|
|
%27 = extractelement <4 x float> %x1, i64 0
|
|
%28 = extractelement <4 x float> %25, i64 0
|
|
%29 = call float @llvm.x86.avx512.vfmadd.f32(float %26, float %27, float %28, i32 11)
|
|
%30 = extractelement <4 x float> %x2, i64 0
|
|
%31 = bitcast i8 %x3 to <8 x i1>
|
|
%32 = extractelement <8 x i1> %31, i64 0
|
|
%33 = select i1 %32, float %29, float %30
|
|
%34 = insertelement <4 x float> %x2, float %33, i64 0
|
|
%res4 = fadd <4 x float> %7, %17
|
|
%res5 = fadd <4 x float> %24, %34
|
|
%res6 = fadd <4 x float> %res4, %res5
|
|
ret <4 x float> %res6
|
|
}
|
|
|
|
define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
|
|
; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_sd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovapd %xmm2, %xmm3
|
|
; X64-NEXT: vfnmsub231sd {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovapd %xmm2, %xmm4
|
|
; X64-NEXT: vfnmsub231sd {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
|
|
; X64-NEXT: vaddpd %xmm4, %xmm3, %xmm3
|
|
; X64-NEXT: vmovapd %xmm2, %xmm4
|
|
; X64-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X64-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vaddpd %xmm2, %xmm4, %xmm0
|
|
; X64-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask3_vfnmsub_sd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: vmovapd %xmm2, %xmm3
|
|
; X86-NEXT: vfnmsub231sd {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovapd %xmm2, %xmm4
|
|
; X86-NEXT: vfnmsub231sd {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
|
|
; X86-NEXT: vaddpd %xmm4, %xmm3, %xmm3
|
|
; X86-NEXT: vmovapd %xmm2, %xmm4
|
|
; X86-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X86-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vaddpd %xmm2, %xmm4, %xmm0
|
|
; X86-NEXT: vaddpd %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%1 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x0
|
|
%2 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x2
|
|
%3 = extractelement <2 x double> %1, i64 0
|
|
%4 = extractelement <2 x double> %x1, i64 0
|
|
%5 = extractelement <2 x double> %2, i64 0
|
|
%6 = call double @llvm.fma.f64(double %3, double %4, double %5)
|
|
%7 = extractelement <2 x double> %x2, i64 0
|
|
%8 = insertelement <2 x double> %x2, double %6, i64 0
|
|
%9 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x0
|
|
%10 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x2
|
|
%11 = extractelement <2 x double> %9, i64 0
|
|
%12 = extractelement <2 x double> %x1, i64 0
|
|
%13 = extractelement <2 x double> %10, i64 0
|
|
%14 = call double @llvm.fma.f64(double %11, double %12, double %13)
|
|
%15 = extractelement <2 x double> %x2, i64 0
|
|
%16 = bitcast i8 %x3 to <8 x i1>
|
|
%17 = extractelement <8 x i1> %16, i64 0
|
|
%18 = select i1 %17, double %14, double %15
|
|
%19 = insertelement <2 x double> %x2, double %18, i64 0
|
|
%20 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x0
|
|
%21 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x2
|
|
%22 = extractelement <2 x double> %20, i64 0
|
|
%23 = extractelement <2 x double> %x1, i64 0
|
|
%24 = extractelement <2 x double> %21, i64 0
|
|
%25 = call double @llvm.x86.avx512.vfmadd.f64(double %22, double %23, double %24, i32 11)
|
|
%26 = extractelement <2 x double> %x2, i64 0
|
|
%27 = insertelement <2 x double> %x2, double %25, i64 0
|
|
%28 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x0
|
|
%29 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x2
|
|
%30 = extractelement <2 x double> %28, i64 0
|
|
%31 = extractelement <2 x double> %x1, i64 0
|
|
%32 = extractelement <2 x double> %29, i64 0
|
|
%33 = call double @llvm.x86.avx512.vfmadd.f64(double %30, double %31, double %32, i32 11)
|
|
%34 = extractelement <2 x double> %x2, i64 0
|
|
%35 = bitcast i8 %x3 to <8 x i1>
|
|
%36 = extractelement <8 x i1> %35, i64 0
|
|
%37 = select i1 %36, double %33, double %34
|
|
%38 = insertelement <2 x double> %x2, double %37, i64 0
|
|
%res4 = fadd <2 x double> %8, %19
|
|
%res5 = fadd <2 x double> %27, %38
|
|
%res6 = fadd <2 x double> %res4, %res5
|
|
ret <2 x double> %res6
|
|
}
|
|
|
|
define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
|
|
; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_ss:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovaps %xmm2, %xmm3
|
|
; X64-NEXT: vfnmsub231ss {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vmovaps %xmm2, %xmm4
|
|
; X64-NEXT: vfnmsub231ss {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
|
|
; X64-NEXT: vaddps %xmm4, %xmm3, %xmm3
|
|
; X64-NEXT: vmovaps %xmm2, %xmm4
|
|
; X64-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X64-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X64-NEXT: vaddps %xmm2, %xmm4, %xmm0
|
|
; X64-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask3_vfnmsub_ss:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: vmovaps %xmm2, %xmm3
|
|
; X86-NEXT: vfnmsub231ss {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovaps %xmm2, %xmm4
|
|
; X86-NEXT: vfnmsub231ss {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
|
|
; X86-NEXT: vaddps %xmm4, %xmm3, %xmm3
|
|
; X86-NEXT: vmovaps %xmm2, %xmm4
|
|
; X86-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4
|
|
; X86-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
|
|
; X86-NEXT: vaddps %xmm2, %xmm4, %xmm0
|
|
; X86-NEXT: vaddps %xmm0, %xmm3, %xmm0
|
|
; X86-NEXT: retl
|
|
%1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x0
|
|
%2 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x2
|
|
%3 = extractelement <4 x float> %1, i64 0
|
|
%4 = extractelement <4 x float> %x1, i64 0
|
|
%5 = extractelement <4 x float> %2, i64 0
|
|
%6 = call float @llvm.fma.f32(float %3, float %4, float %5)
|
|
%7 = extractelement <4 x float> %x2, i64 0
|
|
%8 = insertelement <4 x float> %x2, float %6, i64 0
|
|
%9 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x0
|
|
%10 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x2
|
|
%11 = extractelement <4 x float> %9, i64 0
|
|
%12 = extractelement <4 x float> %x1, i64 0
|
|
%13 = extractelement <4 x float> %10, i64 0
|
|
%14 = call float @llvm.fma.f32(float %11, float %12, float %13)
|
|
%15 = extractelement <4 x float> %x2, i64 0
|
|
%16 = bitcast i8 %x3 to <8 x i1>
|
|
%17 = extractelement <8 x i1> %16, i64 0
|
|
%18 = select i1 %17, float %14, float %15
|
|
%19 = insertelement <4 x float> %x2, float %18, i64 0
|
|
%20 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x0
|
|
%21 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x2
|
|
%22 = extractelement <4 x float> %20, i64 0
|
|
%23 = extractelement <4 x float> %x1, i64 0
|
|
%24 = extractelement <4 x float> %21, i64 0
|
|
%25 = call float @llvm.x86.avx512.vfmadd.f32(float %22, float %23, float %24, i32 11)
|
|
%26 = extractelement <4 x float> %x2, i64 0
|
|
%27 = insertelement <4 x float> %x2, float %25, i64 0
|
|
%28 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x0
|
|
%29 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x2
|
|
%30 = extractelement <4 x float> %28, i64 0
|
|
%31 = extractelement <4 x float> %x1, i64 0
|
|
%32 = extractelement <4 x float> %29, i64 0
|
|
%33 = call float @llvm.x86.avx512.vfmadd.f32(float %30, float %31, float %32, i32 11)
|
|
%34 = extractelement <4 x float> %x2, i64 0
|
|
%35 = bitcast i8 %x3 to <8 x i1>
|
|
%36 = extractelement <8 x i1> %35, i64 0
|
|
%37 = select i1 %36, float %33, float %34
|
|
%38 = insertelement <4 x float> %x2, float %37, i64 0
|
|
%res4 = fadd <4 x float> %8, %19
|
|
%res5 = fadd <4 x float> %27, %38
|
|
%res6 = fadd <4 x float> %res4, %res5
|
|
ret <4 x float> %res6
|
|
}
|
|
|
|
define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1, float *%ptr_b ,i8 %x3,i32 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_rm:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vfmadd231ss {{.*#+}} xmm1 = (xmm0 * mem) + xmm1
|
|
; X64-NEXT: vmovaps %xmm1, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_rm:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
|
|
; X86-NEXT: kmovw %ecx, %k1
|
|
; X86-NEXT: vfmadd231ss {{.*#+}} xmm1 = (xmm0 * mem) + xmm1
|
|
; X86-NEXT: vmovaps %xmm1, %xmm0
|
|
; X86-NEXT: retl
|
|
%q = load float, float* %ptr_b
|
|
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
|
|
%1 = extractelement <4 x float> %x0, i64 0
|
|
%2 = extractelement <4 x float> %vecinit.i, i64 0
|
|
%3 = extractelement <4 x float> %x1, i64 0
|
|
%4 = call float @llvm.fma.f32(float %1, float %2, float %3)
|
|
%5 = bitcast i8 %x3 to <8 x i1>
|
|
%6 = extractelement <8 x i1> %5, i64 0
|
|
%7 = select i1 %6, float %4, float %3
|
|
%8 = insertelement <4 x float> %x1, float %7, i64 0
|
|
ret <4 x float> %8
|
|
}
|
|
|
|
define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
|
|
; X64-LABEL: test_int_x86_avx512_mask_vfmadd_ss_rm:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %esi, %k1
|
|
; X64-NEXT: vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_int_x86_avx512_mask_vfmadd_ss_rm:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
|
|
; X86-NEXT: kmovw %ecx, %k1
|
|
; X86-NEXT: vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1
|
|
; X86-NEXT: retl
|
|
%q = load float, float* %ptr_b
|
|
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
|
|
%1 = extractelement <4 x float> %x0, i64 0
|
|
%2 = extractelement <4 x float> %vecinit.i, i64 0
|
|
%3 = extractelement <4 x float> %x1, i64 0
|
|
%4 = call float @llvm.fma.f32(float %1, float %2, float %3)
|
|
%5 = bitcast i8 %x3 to <8 x i1>
|
|
%6 = extractelement <8 x i1> %5, i64 0
|
|
%7 = select i1 %6, float %4, float %1
|
|
%8 = insertelement <4 x float> %x0, float %7, i64 0
|
|
ret <4 x float> %8
|
|
}
|
|
|
|
|
|
define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
|
|
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss_rm:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%q = load float, float* %ptr_b
|
|
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
|
|
%1 = extractelement <4 x float> %x0, i64 0
|
|
%2 = extractelement <4 x float> %x1, i64 0
|
|
%3 = extractelement <4 x float> %vecinit.i, i64 0
|
|
%4 = call float @llvm.fma.f32(float %1, float %2, float %3)
|
|
%5 = select i1 false, float %4, float 0.000000e+00
|
|
%6 = insertelement <4 x float> %x0, float %5, i64 0
|
|
ret <4 x float> %6
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_psll_d_512(<16 x i32> %a0, <4 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psll_d_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpslld %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
|
|
ret <16 x i32> %res
|
|
}
|
|
define <16 x i32> @test_x86_avx512_mask_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psll_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpslld %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psll_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpslld %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
|
|
ret <16 x i32> %res2
|
|
}
|
|
define <16 x i32> @test_x86_avx512_maskz_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psll_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psll_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
|
|
ret <16 x i32> %res2
|
|
}
|
|
declare <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32>, <4 x i32>) nounwind readnone
|
|
|
|
|
|
define <8 x i64> @test_x86_avx512_psll_q_512(<8 x i64> %a0, <2 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psll_q_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsllq %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
|
|
ret <8 x i64> %res
|
|
}
|
|
define <8 x i64> @test_x86_avx512_mask_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psll_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsllq %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psll_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsllq %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
|
|
ret <8 x i64> %res2
|
|
}
|
|
define <8 x i64> @test_x86_avx512_maskz_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psll_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psll_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
|
|
ret <8 x i64> %res2
|
|
}
|
|
declare <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64>, <2 x i64>) nounwind readnone
|
|
|
|
|
|
define <16 x i32> @test_x86_avx512_pslli_d_512(<16 x i32> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_pslli_d_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpslld $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
|
|
ret <16 x i32> %res
|
|
}
|
|
define <16 x i32> @test_x86_avx512_mask_pslli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_pslli_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpslld $7, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_pslli_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpslld $7, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
|
|
ret <16 x i32> %res2
|
|
}
|
|
define <16 x i32> @test_x86_avx512_maskz_pslli_d_512(<16 x i32> %a0, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_pslli_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpslld $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_pslli_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpslld $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
|
|
ret <16 x i32> %res2
|
|
}
|
|
declare <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32>, i32) nounwind readnone
|
|
|
|
|
|
define <8 x i64> @test_x86_avx512_pslli_q_512(<8 x i64> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_pslli_q_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsllq $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
|
|
ret <8 x i64> %res
|
|
}
|
|
define <8 x i64> @test_x86_avx512_mask_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_pslli_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsllq $7, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_pslli_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsllq $7, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
|
|
ret <8 x i64> %res2
|
|
}
|
|
define <8 x i64> @test_x86_avx512_maskz_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_pslli_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsllq $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_pslli_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsllq $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
|
|
ret <8 x i64> %res2
|
|
}
|
|
declare <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64>, i32) nounwind readnone
|
|
|
|
|
|
define <8 x i64> @test_x86_avx512_psra_q_512(<8 x i64> %a0, <2 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psra_q_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsraq %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
|
|
ret <8 x i64> %res
|
|
}
|
|
define <8 x i64> @test_x86_avx512_mask_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psra_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsraq %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psra_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsraq %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
|
|
ret <8 x i64> %res2
|
|
}
|
|
define <8 x i64> @test_x86_avx512_maskz_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psra_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psra_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
|
|
ret <8 x i64> %res2
|
|
}
|
|
declare <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64>, <2 x i64>) nounwind readnone
|
|
|
|
|
|
define <16 x i32> @test_x86_avx512_psra_d_512(<16 x i32> %a0, <4 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psra_d_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsrad %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
|
|
ret <16 x i32> %res
|
|
}
|
|
define <16 x i32> @test_x86_avx512_mask_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psra_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrad %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psra_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsrad %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
|
|
ret <16 x i32> %res2
|
|
}
|
|
define <16 x i32> @test_x86_avx512_maskz_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psra_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psra_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
|
|
ret <16 x i32> %res2
|
|
}
|
|
declare <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32>, <4 x i32>) nounwind readnone
|
|
|
|
|
|
|
|
define <8 x i64> @test_x86_avx512_psrai_q_512(<8 x i64> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_psrai_q_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsraq $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
|
|
ret <8 x i64> %res
|
|
}
|
|
define <8 x i64> @test_x86_avx512_mask_psrai_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psrai_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsraq $7, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psrai_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsraq $7, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
|
|
ret <8 x i64> %res2
|
|
}
|
|
define <8 x i64> @test_x86_avx512_maskz_psrai_q_512(<8 x i64> %a0, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psrai_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsraq $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psrai_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsraq $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
|
|
ret <8 x i64> %res2
|
|
}
|
|
declare <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64>, i32) nounwind readnone
|
|
|
|
|
|
define <16 x i32> @test_x86_avx512_psrai_d_512(<16 x i32> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_psrai_d_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsrad $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
|
|
ret <16 x i32> %res
|
|
}
|
|
define <16 x i32> @test_x86_avx512_mask_psrai_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psrai_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrad $7, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psrai_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsrad $7, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
|
|
ret <16 x i32> %res2
|
|
}
|
|
define <16 x i32> @test_x86_avx512_maskz_psrai_d_512(<16 x i32> %a0, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psrai_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrad $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psrai_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsrad $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
|
|
ret <16 x i32> %res2
|
|
}
|
|
declare <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32>, i32) nounwind readnone
|
|
|
|
|
|
|
|
define <16 x i32> @test_x86_avx512_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrl_d_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsrld %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
|
|
ret <16 x i32> %res
|
|
}
|
|
define <16 x i32> @test_x86_avx512_mask_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psrl_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrld %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psrl_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsrld %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
|
|
ret <16 x i32> %res2
|
|
}
|
|
define <16 x i32> @test_x86_avx512_maskz_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psrl_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psrl_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
|
|
ret <16 x i32> %res2
|
|
}
|
|
declare <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32>, <4 x i32>) nounwind readnone
|
|
|
|
|
|
define <8 x i64> @test_x86_avx512_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrl_q_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsrlq %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
|
|
ret <8 x i64> %res
|
|
}
|
|
define <8 x i64> @test_x86_avx512_mask_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psrl_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psrl_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
|
|
ret <8 x i64> %res2
|
|
}
|
|
define <8 x i64> @test_x86_avx512_maskz_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psrl_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psrl_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
|
|
ret <8 x i64> %res2
|
|
}
|
|
declare <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64>, <2 x i64>) nounwind readnone
|
|
|
|
|
|
define <16 x i32> @test_x86_avx512_psrli_d_512(<16 x i32> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_psrli_d_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsrld $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
|
|
ret <16 x i32> %res
|
|
}
|
|
define <16 x i32> @test_x86_avx512_mask_psrli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psrli_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrld $7, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psrli_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsrld $7, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
|
|
ret <16 x i32> %res2
|
|
}
|
|
define <16 x i32> @test_x86_avx512_maskz_psrli_d_512(<16 x i32> %a0, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psrli_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrld $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psrli_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsrld $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
|
|
ret <16 x i32> %res2
|
|
}
|
|
declare <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32>, i32) nounwind readnone
|
|
|
|
|
|
define <8 x i64> @test_x86_avx512_psrli_q_512(<8 x i64> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_psrli_q_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsrlq $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
|
|
ret <8 x i64> %res
|
|
}
|
|
define <8 x i64> @test_x86_avx512_mask_psrli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psrli_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrlq $7, %zmm0, %zmm1 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psrli_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsrlq $7, %zmm0, %zmm1 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
|
|
ret <8 x i64> %res2
|
|
}
|
|
define <8 x i64> @test_x86_avx512_maskz_psrli_q_512(<8 x i64> %a0, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psrli_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrlq $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psrli_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsrlq $7, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
|
|
ret <8 x i64> %res2
|
|
}
|
|
declare <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64>, i32) nounwind readnone
|
|
|
|
define <16 x i32> @test_x86_avx512_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psllv_d_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %a0, <16 x i32> %a1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_psllv_d_512_const() {
|
|
; X64-LABEL: test_x86_avx512_psllv_d_512_const:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
|
|
; X64-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm0
|
|
; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
|
|
; X64-NEXT: vpsllvd {{.*}}(%rip), %zmm1, %zmm1
|
|
; X64-NEXT: vpaddd %zmm1, %zmm0, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_psllv_d_512_const:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
|
|
; X86-NEXT: vpsllvd {{\.LCPI.*}}, %zmm0, %zmm0
|
|
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
|
|
; X86-NEXT: vpsllvd {{\.LCPI.*}}, %zmm1, %zmm1
|
|
; X86-NEXT: vpaddd %zmm1, %zmm0, %zmm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 -1>)
|
|
%res2 = add <16 x i32> %res0, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psllv_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psllv_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %a0, <16 x i32> %a1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %a2
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psllv_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psllv_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %a0, <16 x i32> %a1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32>, <16 x i32>) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psllv_q_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %a0, <8 x i64> %a1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_psllv_q_512_const() {
|
|
; X64-LABEL: test_x86_avx512_psllv_q_512_const:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,18446744073709551615,3,7,18446744073709551615,0]
|
|
; X64-NEXT: vpsllvq {{.*}}(%rip), %zmm0, %zmm0
|
|
; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,18446744073709551615]
|
|
; X64-NEXT: vpsllvq {{.*}}(%rip), %zmm1, %zmm1
|
|
; X64-NEXT: vpaddq %zmm1, %zmm0, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_psllv_q_512_const:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0]
|
|
; X86-NEXT: vpsllvq {{\.LCPI.*}}, %zmm0, %zmm0
|
|
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295]
|
|
; X86-NEXT: vpsllvq {{\.LCPI.*}}, %zmm1, %zmm1
|
|
; X86-NEXT: vpaddq %zmm1, %zmm0, %zmm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 -1>)
|
|
%res2 = add <8 x i64> %res0, %res1
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psllv_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psllv_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %a0, <8 x i64> %a1)
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %a2
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psllv_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psllv_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %a0, <8 x i64> %a1)
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64>, <8 x i64>) nounwind readnone
|
|
|
|
define <16 x i32> @test_x86_avx512_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrav_d_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %a0, <16 x i32> %a1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psrav_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsravd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psrav_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsravd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %a0, <16 x i32> %a1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %a2
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psrav_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psrav_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %a0, <16 x i32> %a1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32>, <16 x i32>) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrav_q_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %a0, <8 x i64> %a1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psrav_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsravq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psrav_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsravq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %a0, <8 x i64> %a1)
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %a2
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psrav_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psrav_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %a0, <8 x i64> %a1)
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64>, <8 x i64>) nounwind readnone
|
|
|
|
define <16 x i32> @test_x86_avx512_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrlv_d_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %a0, <16 x i32> %a1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_psrlv_d_512_const() {
|
|
; X64-LABEL: test_x86_avx512_psrlv_d_512_const:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
|
|
; X64-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
|
|
; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
|
|
; X64-NEXT: vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
|
|
; X64-NEXT: vpaddd %zmm1, %zmm0, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_psrlv_d_512_const:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
|
|
; X86-NEXT: vpsrlvd {{\.LCPI.*}}, %zmm0, %zmm0
|
|
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
|
|
; X86-NEXT: vpsrlvd {{\.LCPI.*}}, %zmm1, %zmm1
|
|
; X86-NEXT: vpaddd %zmm1, %zmm0, %zmm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 -1 >)
|
|
%res2 = add <16 x i32> %res0, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psrlv_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psrlv_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %a0, <16 x i32> %a1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %a2
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psrlv_d_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psrlv_d_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
|
; X86-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %a0, <16 x i32> %a1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32>, <16 x i32>) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrlv_q_512:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %a0, <8 x i64> %a1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_psrlv_q_512_const() {
|
|
; X64-LABEL: test_x86_avx512_psrlv_q_512_const:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,18446744073709551615,3,7,18446744073709551615,0]
|
|
; X64-NEXT: vpsrlvq {{.*}}(%rip), %zmm0, %zmm0
|
|
; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,18446744073709551615]
|
|
; X64-NEXT: vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
|
|
; X64-NEXT: vpaddq %zmm1, %zmm0, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_psrlv_q_512_const:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0]
|
|
; X86-NEXT: vpsrlvq {{\.LCPI.*}}, %zmm0, %zmm0
|
|
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295]
|
|
; X86-NEXT: vpsrlvq {{\.LCPI.*}}, %zmm1, %zmm1
|
|
; X86-NEXT: vpaddq %zmm1, %zmm0, %zmm0
|
|
; X86-NEXT: retl
|
|
%res0 = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 -1>)
|
|
%res2 = add <8 x i64> %res0, %res1
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_mask_psrlv_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X64-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_mask_psrlv_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; X86-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %a0, <8 x i64> %a1)
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %a2
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
|
|
; X64-LABEL: test_x86_avx512_maskz_psrlv_q_512:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: kmovw %edi, %k1
|
|
; X64-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: test_x86_avx512_maskz_psrlv_q_512:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; X86-NEXT: retl
|
|
%res = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %a0, <8 x i64> %a1)
|
|
%mask.cast = bitcast i8 %mask to <8 x i1>
|
|
%res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64>, <8 x i64>) nounwind readnone
|
|
|
|
define <16 x float> @bad_mask_transition(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double> %d, <16 x float> %e, <16 x float> %f) {
|
|
; X64-LABEL: bad_mask_transition:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: vcmplt_oqpd %zmm1, %zmm0, %k0
|
|
; X64-NEXT: kmovw %k0, %eax
|
|
; X64-NEXT: vcmplt_oqpd %zmm3, %zmm2, %k0
|
|
; X64-NEXT: kmovw %k0, %ecx
|
|
; X64-NEXT: movzbl %al, %eax
|
|
; X64-NEXT: movzbl %cl, %ecx
|
|
; X64-NEXT: kmovw %eax, %k0
|
|
; X64-NEXT: kmovw %ecx, %k1
|
|
; X64-NEXT: kunpckbw %k0, %k1, %k1
|
|
; X64-NEXT: vblendmps %zmm5, %zmm4, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: bad_mask_transition:
|
|
; X86: # %bb.0: # %entry
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
|
; X86-NEXT: .cfi_offset %ebp, -8
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: .cfi_def_cfa_register %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vmovaps 72(%ebp), %zmm3
|
|
; X86-NEXT: vcmplt_oqpd %zmm1, %zmm0, %k0
|
|
; X86-NEXT: kmovw %k0, %eax
|
|
; X86-NEXT: vcmplt_oqpd 8(%ebp), %zmm2, %k0
|
|
; X86-NEXT: kmovw %k0, %ecx
|
|
; X86-NEXT: movzbl %al, %eax
|
|
; X86-NEXT: movzbl %cl, %ecx
|
|
; X86-NEXT: kmovw %eax, %k0
|
|
; X86-NEXT: kmovw %ecx, %k1
|
|
; X86-NEXT: kunpckbw %k0, %k1, %k1
|
|
; X86-NEXT: vmovaps 136(%ebp), %zmm3 {%k1}
|
|
; X86-NEXT: vmovaps %zmm3, %zmm0
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: .cfi_def_cfa %esp, 4
|
|
; X86-NEXT: retl
|
|
entry:
|
|
%0 = call <8 x i1> @llvm.x86.avx512.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 17, i32 4)
|
|
%1 = bitcast <8 x i1> %0 to i8
|
|
%2 = call <8 x i1> @llvm.x86.avx512.cmp.pd.512(<8 x double> %c, <8 x double> %d, i32 17, i32 4)
|
|
%3 = bitcast <8 x i1> %2 to i8
|
|
%conv = zext i8 %1 to i16
|
|
%conv2 = zext i8 %3 to i16
|
|
%4 = bitcast i16 %conv to <16 x i1>
|
|
%5 = bitcast i16 %conv2 to <16 x i1>
|
|
%6 = shufflevector <16 x i1> %4, <16 x i1> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
%7 = shufflevector <16 x i1> %5, <16 x i1> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
%8 = shufflevector <8 x i1> %6, <8 x i1> %7, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
|
%9 = select <16 x i1> %8, <16 x float> %f, <16 x float> %e
|
|
ret <16 x float> %9
|
|
}
|
|
|
|
define <16 x float> @bad_mask_transition_2(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double> %d, <16 x float> %e, <16 x float> %f) {
|
|
; X64-LABEL: bad_mask_transition_2:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: vcmplt_oqpd %zmm1, %zmm0, %k0
|
|
; X64-NEXT: kmovw %k0, %eax
|
|
; X64-NEXT: movzbl %al, %eax
|
|
; X64-NEXT: kmovw %eax, %k1
|
|
; X64-NEXT: vblendmps %zmm5, %zmm4, %zmm0 {%k1}
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: bad_mask_transition_2:
|
|
; X86: # %bb.0: # %entry
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
|
; X86-NEXT: .cfi_offset %ebp, -8
|
|
; X86-NEXT: movl %esp, %ebp
|
|
; X86-NEXT: .cfi_def_cfa_register %ebp
|
|
; X86-NEXT: andl $-64, %esp
|
|
; X86-NEXT: subl $64, %esp
|
|
; X86-NEXT: vmovaps 72(%ebp), %zmm2
|
|
; X86-NEXT: vcmplt_oqpd %zmm1, %zmm0, %k0
|
|
; X86-NEXT: kmovw %k0, %eax
|
|
; X86-NEXT: movzbl %al, %eax
|
|
; X86-NEXT: kmovw %eax, %k1
|
|
; X86-NEXT: vmovaps 136(%ebp), %zmm2 {%k1}
|
|
; X86-NEXT: vmovaps %zmm2, %zmm0
|
|
; X86-NEXT: movl %ebp, %esp
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: .cfi_def_cfa %esp, 4
|
|
; X86-NEXT: retl
|
|
entry:
|
|
%0 = call <8 x i1> @llvm.x86.avx512.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 17, i32 4)
|
|
%1 = bitcast <8 x i1> %0 to i8
|
|
%conv = zext i8 %1 to i16
|
|
%2 = bitcast i16 %conv to <16 x i1>
|
|
%3 = select <16 x i1> %2, <16 x float> %f, <16 x float> %e
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.compress.v8f64(<8 x double>, <8 x double>, <8 x i1>)
|
|
declare <16 x float> @llvm.x86.avx512.mask.compress.v16f32(<16 x float>, <16 x float>, <16 x i1>)
|
|
declare <8 x i64> @llvm.x86.avx512.mask.compress.v8i64(<8 x i64>, <8 x i64>, <8 x i1>)
|
|
declare <16 x i32> @llvm.x86.avx512.mask.compress.v16i32(<16 x i32>, <16 x i32>, <16 x i1>)
|
|
declare <8 x double> @llvm.x86.avx512.mask.expand.v8f64(<8 x double>, <8 x double>, <8 x i1>)
|
|
declare <16 x float> @llvm.x86.avx512.mask.expand.v16f32(<16 x float>, <16 x float>, <16 x i1>)
|
|
declare <8 x i64> @llvm.x86.avx512.mask.expand.v8i64(<8 x i64>, <8 x i64>, <8 x i1>)
|
|
declare <16 x i32> @llvm.x86.avx512.mask.expand.v16i32(<16 x i32>, <16 x i32>, <16 x i1>)
|