llvm-project/llvm/test/CodeGen/X86/vector-constrained-fp-intri...

5656 lines
234 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck --check-prefix=AVX %s
define <1 x float> @constrained_vector_fdiv_v1f32() {
; CHECK-LABEL: constrained_vector_fdiv_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: divss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fdiv_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vdivss {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%div = call <1 x float> @llvm.experimental.constrained.fdiv.v1f32(
<1 x float> <float 1.000000e+00>,
<1 x float> <float 1.000000e+01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %div
}
define <2 x double> @constrained_vector_fdiv_v2f64() {
; CHECK-LABEL: constrained_vector_fdiv_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
; CHECK-NEXT: divpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fdiv_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
; AVX-NEXT: vdivpd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%div = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(
<2 x double> <double 1.000000e+00, double 2.000000e+00>,
<2 x double> <double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %div
}
define <3 x float> @constrained_vector_fdiv_v3f32() {
; CHECK-LABEL: constrained_vector_fdiv_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: divss %xmm1, %xmm2
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: divss %xmm1, %xmm0
; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; CHECK-NEXT: divss %xmm1, %xmm3
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fdiv_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm1
; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX-NEXT: vdivss %xmm0, %xmm2, %xmm2
; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; AVX-NEXT: vdivss %xmm0, %xmm3, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
; AVX-NEXT: retq
entry:
%div = call <3 x float> @llvm.experimental.constrained.fdiv.v3f32(
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
<3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %div
}
define <3 x double> @constrained_vector_fdiv_v3f64() {
; CHECK-LABEL: constrained_vector_fdiv_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
; CHECK-NEXT: divpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: divsd {{.*}}(%rip), %xmm1
; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fdiv_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vdivsd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovapd {{.*#+}} xmm1 = [1.0E+0,2.0E+0]
; AVX-NEXT: vdivpd {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%div = call <3 x double> @llvm.experimental.constrained.fdiv.v3f64(
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
<3 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %div
}
define <4 x double> @constrained_vector_fdiv_v4f64() {
; CHECK-LABEL: constrained_vector_fdiv_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm2 = [1.0E+1,1.0E+1]
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
; CHECK-NEXT: divpd %xmm2, %xmm0
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [3.0E+0,4.0E+0]
; CHECK-NEXT: divpd %xmm2, %xmm1
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fdiv_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.0E+0,2.0E+0,3.0E+0,4.0E+0]
; AVX-NEXT: vdivpd {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
entry:
%div = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64(
<4 x double> <double 1.000000e+00, double 2.000000e+00,
double 3.000000e+00, double 4.000000e+00>,
<4 x double> <double 1.000000e+01, double 1.000000e+01,
double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %div
}
define <1 x float> @constrained_vector_frem_v1f32() {
; CHECK-LABEL: constrained_vector_frem_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fmodf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_frem_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq fmodf
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%rem = call <1 x float> @llvm.experimental.constrained.frem.v1f32(
<1 x float> <float 1.000000e+00>,
<1 x float> <float 1.000000e+01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %rem
}
define <2 x double> @constrained_vector_frem_v2f64() {
; CHECK-LABEL: constrained_vector_frem_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmod
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmod
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_frem_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmod
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmod
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%rem = call <2 x double> @llvm.experimental.constrained.frem.v2f64(
<2 x double> <double 1.000000e+00, double 2.000000e+00>,
<2 x double> <double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %rem
}
define <3 x float> @constrained_vector_frem_v3f32() {
; CHECK-LABEL: constrained_vector_frem_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fmodf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fmodf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fmodf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_frem_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq fmodf
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq fmodf
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq fmodf
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%rem = call <3 x float> @llvm.experimental.constrained.frem.v3f32(
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
<3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %rem
}
define <3 x double> @constrained_vector_frem_v3f64() {
; CHECK-LABEL: constrained_vector_frem_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmod
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmod
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmod
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_frem_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmod
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmod
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq fmod
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%rem = call <3 x double> @llvm.experimental.constrained.frem.v3f64(
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
<3 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %rem
}
define <4 x double> @constrained_vector_frem_v4f64() {
; CHECK-LABEL: constrained_vector_frem_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmod
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmod
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmod
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmod
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_frem_v4f64:
; AVX: # %bb.0:
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmod
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmod
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmod
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmod
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
%rem = call <4 x double> @llvm.experimental.constrained.frem.v4f64(
<4 x double> <double 1.000000e+00, double 2.000000e+00,
double 3.000000e+00, double 4.000000e+00>,
<4 x double> <double 1.000000e+01, double 1.000000e+01,
double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %rem
}
define <1 x float> @constrained_vector_fmul_v1f32() {
; CHECK-LABEL: constrained_vector_fmul_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fmul_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%mul = call <1 x float> @llvm.experimental.constrained.fmul.v1f32(
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 2.000000e+00>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %mul
}
define <2 x double> @constrained_vector_fmul_v2f64() {
; CHECK-LABEL: constrained_vector_fmul_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fmul_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
; AVX-NEXT: vmulpd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%mul = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
<2 x double> <double 2.000000e+00, double 3.000000e+00>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %mul
}
define <3 x float> @constrained_vector_fmul_v3f32() {
; CHECK-LABEL: constrained_vector_fmul_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: mulss %xmm1, %xmm2
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: mulss %xmm1, %xmm0
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm1
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fmul_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm2
; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
; AVX-NEXT: retq
entry:
%mul = call <3 x float> @llvm.experimental.constrained.fmul.v3f32(
<3 x float> <float 0x7FF0000000000000, float 0x7FF0000000000000,
float 0x7FF0000000000000>,
<3 x float> <float 1.000000e+00, float 1.000000e+01, float 1.000000e+02>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %mul
}
define <3 x double> @constrained_vector_fmul_v3f64() {
; CHECK-LABEL: constrained_vector_fmul_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: mulsd {{.*}}(%rip), %xmm1
; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fmul_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmulsd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
; AVX-NEXT: vmulpd {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%mul = call <3 x double> @llvm.experimental.constrained.fmul.v3f64(
<3 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
double 0x7FEFFFFFFFFFFFFF>,
<3 x double> <double 1.000000e+00, double 1.000000e+01, double 1.000000e+02>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %mul
}
define <4 x double> @constrained_vector_fmul_v4f64() {
; CHECK-LABEL: constrained_vector_fmul_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [2.0E+0,3.0E+0]
; CHECK-NEXT: mulpd %xmm1, %xmm0
; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm1
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fmul_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308]
; AVX-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
entry:
%mul = call <4 x double> @llvm.experimental.constrained.fmul.v4f64(
<4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
<4 x double> <double 2.000000e+00, double 3.000000e+00,
double 4.000000e+00, double 5.000000e+00>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %mul
}
define <1 x float> @constrained_vector_fadd_v1f32() {
; CHECK-LABEL: constrained_vector_fadd_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: addss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fadd_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%add = call <1 x float> @llvm.experimental.constrained.fadd.v1f32(
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 1.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %add
}
define <2 x double> @constrained_vector_fadd_v2f64() {
; CHECK-LABEL: constrained_vector_fadd_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: addsd %xmm0, %xmm1
; CHECK-NEXT: addsd {{.*}}(%rip), %xmm0
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fadd_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
entry:
%add = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
<2 x double> <double 1.000000e+00, double 1.000000e-01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %add
}
define <3 x float> @constrained_vector_fadd_v3f32() {
; CHECK-LABEL: constrained_vector_fadd_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: addss %xmm2, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: addss %xmm2, %xmm0
; CHECK-NEXT: addss {{.*}}(%rip), %xmm2
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fadd_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm2
; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX-NEXT: retq
entry:
%add = call <3 x float> @llvm.experimental.constrained.fadd.v3f32(
<3 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000,
float 0xFFFFFFFFE0000000>,
<3 x float> <float 2.0, float 1.0, float 0.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %add
}
define <3 x double> @constrained_vector_fadd_v3f64() {
; CHECK-LABEL: constrained_vector_fadd_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorpd %xmm2, %xmm2
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: addsd %xmm1, %xmm2
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: addsd %xmm1, %xmm0
; CHECK-NEXT: addsd {{.*}}(%rip), %xmm1
; CHECK-NEXT: movsd %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fadd_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm2
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%add = call <3 x double> @llvm.experimental.constrained.fadd.v3f64(
<3 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
double 0x7FEFFFFFFFFFFFFF>,
<3 x double> <double 2.0, double 1.0, double 0.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %add
}
define <4 x double> @constrained_vector_fadd_v4f64() {
; CHECK-LABEL: constrained_vector_fadd_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; CHECK-NEXT: addsd %xmm1, %xmm2
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: addsd %xmm1, %xmm0
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; CHECK-NEXT: addsd %xmm1, %xmm2
; CHECK-NEXT: addsd {{.*}}(%rip), %xmm1
; CHECK-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fadd_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm2
; AVX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm2
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
%add = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(
<4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
<4 x double> <double 1.000000e+00, double 1.000000e-01,
double 2.000000e+00, double 2.000000e-01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %add
}
define <1 x float> @constrained_vector_fsub_v1f32() {
; CHECK-LABEL: constrained_vector_fsub_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: subss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fsub_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vsubss {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%sub = call <1 x float> @llvm.experimental.constrained.fsub.v1f32(
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 1.000000e+00>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %sub
}
define <2 x double> @constrained_vector_fsub_v2f64() {
; CHECK-LABEL: constrained_vector_fsub_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm1
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fsub_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
entry:
%sub = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(
<2 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>,
<2 x double> <double 1.000000e+00, double 1.000000e-01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %sub
}
define <3 x float> @constrained_vector_fsub_v3f32() {
; CHECK-LABEL: constrained_vector_fsub_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: movaps %xmm1, %xmm2
; CHECK-NEXT: subss %xmm0, %xmm2
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: subss {{.*}}(%rip), %xmm0
; CHECK-NEXT: subss {{.*}}(%rip), %xmm1
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fsub_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vsubss {{.*}}(%rip), %xmm1, %xmm2
; AVX-NEXT: vsubss {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX-NEXT: retq
entry:
%sub = call <3 x float> @llvm.experimental.constrained.fsub.v3f32(
<3 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000,
float 0xFFFFFFFFE0000000>,
<3 x float> <float 2.0, float 1.0, float 0.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %sub
}
define <3 x double> @constrained_vector_fsub_v3f64() {
; CHECK-LABEL: constrained_vector_fsub_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorpd %xmm0, %xmm0
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: movapd %xmm1, %xmm2
; CHECK-NEXT: subsd %xmm0, %xmm2
; CHECK-NEXT: movapd %xmm1, %xmm0
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm1
; CHECK-NEXT: movsd %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fsub_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm1, %xmm2
; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%sub = call <3 x double> @llvm.experimental.constrained.fsub.v3f64(
<3 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF,
double 0xFFEFFFFFFFFFFFFF>,
<3 x double> <double 2.0, double 1.0, double 0.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %sub
}
define <4 x double> @constrained_vector_fsub_v4f64() {
; CHECK-LABEL: constrained_vector_fsub_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: movapd %xmm1, %xmm2
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm2
; CHECK-NEXT: movapd %xmm1, %xmm0
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; CHECK-NEXT: movapd %xmm1, %xmm2
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm2
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm1
; CHECK-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fsub_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm1
; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm2
; AVX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm2
; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
%sub = call <4 x double> @llvm.experimental.constrained.fsub.v4f64(
<4 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF,
double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>,
<4 x double> <double 1.000000e+00, double 1.000000e-01,
double 2.000000e+00, double 2.000000e-01>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %sub
}
define <1 x float> @constrained_vector_sqrt_v1f32() {
; CHECK-LABEL: constrained_vector_sqrt_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: sqrtss %xmm0, %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sqrt_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%sqrt = call <1 x float> @llvm.experimental.constrained.sqrt.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %sqrt
}
define <2 x double> @constrained_vector_sqrt_v2f64() {
; CHECK-LABEL: constrained_vector_sqrt_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sqrt_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vsqrtpd {{.*}}(%rip), %xmm0
; AVX-NEXT: retq
entry:
%sqrt = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %sqrt
}
define <3 x float> @constrained_vector_sqrt_v3f32() {
; CHECK-LABEL: constrained_vector_sqrt_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: sqrtss %xmm0, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: sqrtss %xmm0, %xmm0
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: sqrtss %xmm2, %xmm2
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sqrt_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vsqrtss %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX-NEXT: vsqrtss %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX-NEXT: retq
entry:
%sqrt = call <3 x float> @llvm.experimental.constrained.sqrt.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %sqrt
}
define <3 x double> @constrained_vector_sqrt_v3f64() {
; CHECK-LABEL: constrained_vector_sqrt_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: sqrtsd %xmm0, %xmm1
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sqrt_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vsqrtpd {{.*}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%sqrt = call <3 x double> @llvm.experimental.constrained.sqrt.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %sqrt
}
define <4 x double> @constrained_vector_sqrt_v4f64() {
; CHECK-LABEL: constrained_vector_sqrt_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm1
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sqrt_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vsqrtpd {{.*}}(%rip), %ymm0
; AVX-NEXT: retq
entry:
%sqrt = call <4 x double> @llvm.experimental.constrained.sqrt.v4f64(
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %sqrt
}
define <1 x float> @constrained_vector_pow_v1f32() {
; CHECK-LABEL: constrained_vector_pow_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq powf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_pow_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq powf
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%pow = call <1 x float> @llvm.experimental.constrained.pow.v1f32(
<1 x float> <float 42.0>,
<1 x float> <float 3.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %pow
}
define <2 x double> @constrained_vector_pow_v2f64() {
; CHECK-LABEL: constrained_vector_pow_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq pow
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq pow
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_pow_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq pow
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq pow
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%pow = call <2 x double> @llvm.experimental.constrained.pow.v2f64(
<2 x double> <double 42.1, double 42.2>,
<2 x double> <double 3.0, double 3.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %pow
}
define <3 x float> @constrained_vector_pow_v3f32() {
; CHECK-LABEL: constrained_vector_pow_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq powf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq powf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq powf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_pow_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq powf
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq powf
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq powf
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%pow = call <3 x float> @llvm.experimental.constrained.pow.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
<3 x float> <float 3.0, float 3.0, float 3.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %pow
}
define <3 x double> @constrained_vector_pow_v3f64() {
; CHECK-LABEL: constrained_vector_pow_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq pow
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq pow
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq pow
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_pow_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq pow
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq pow
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq pow
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%pow = call <3 x double> @llvm.experimental.constrained.pow.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
<3 x double> <double 3.0, double 3.0, double 3.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %pow
}
define <4 x double> @constrained_vector_pow_v4f64() {
; CHECK-LABEL: constrained_vector_pow_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq pow
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq pow
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq pow
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq pow
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_pow_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq pow
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq pow
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq pow
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq pow
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%pow = call <4 x double> @llvm.experimental.constrained.pow.v4f64(
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
<4 x double> <double 3.0, double 3.0,
double 3.0, double 3.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %pow
}
define <1 x float> @constrained_vector_powi_v1f32() {
; CHECK-LABEL: constrained_vector_powi_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powisf2
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_powi_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powisf2
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%powi = call <1 x float> @llvm.experimental.constrained.powi.v1f32(
<1 x float> <float 42.0>,
i32 3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %powi
}
define <2 x double> @constrained_vector_powi_v2f64() {
; CHECK-LABEL: constrained_vector_powi_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_powi_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%powi = call <2 x double> @llvm.experimental.constrained.powi.v2f64(
<2 x double> <double 42.1, double 42.2>,
i32 3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %powi
}
define <3 x float> @constrained_vector_powi_v3f32() {
; CHECK-LABEL: constrained_vector_powi_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powisf2
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powisf2
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powisf2
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_powi_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powisf2
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powisf2
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powisf2
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%powi = call <3 x float> @llvm.experimental.constrained.powi.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
i32 3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %powi
}
define <3 x double> @constrained_vector_powi_v3f64() {
; CHECK-LABEL: constrained_vector_powi_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_powi_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq __powidf2
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%powi = call <3 x double> @llvm.experimental.constrained.powi.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
i32 3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %powi
}
define <4 x double> @constrained_vector_powi_v4f64() {
; CHECK-LABEL: constrained_vector_powi_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movl $3, %edi
; CHECK-NEXT: callq __powidf2
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_powi_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: callq __powidf2
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%powi = call <4 x double> @llvm.experimental.constrained.powi.v4f64(
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
i32 3,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %powi
}
define <1 x float> @constrained_vector_sin_v1f32() {
; CHECK-LABEL: constrained_vector_sin_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq sinf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sin_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq sinf
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%sin = call <1 x float> @llvm.experimental.constrained.sin.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %sin
}
define <2 x double> @constrained_vector_sin_v2f64() {
; CHECK-LABEL: constrained_vector_sin_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq sin
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq sin
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sin_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq sin
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq sin
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%sin = call <2 x double> @llvm.experimental.constrained.sin.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %sin
}
define <3 x float> @constrained_vector_sin_v3f32() {
; CHECK-LABEL: constrained_vector_sin_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq sinf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq sinf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq sinf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sin_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq sinf
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq sinf
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq sinf
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%sin = call <3 x float> @llvm.experimental.constrained.sin.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %sin
}
define <3 x double> @constrained_vector_sin_v3f64() {
; CHECK-LABEL: constrained_vector_sin_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq sin
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq sin
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq sin
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sin_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq sin
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq sin
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq sin
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%sin = call <3 x double> @llvm.experimental.constrained.sin.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %sin
}
define <4 x double> @constrained_vector_sin_v4f64() {
; CHECK-LABEL: constrained_vector_sin_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq sin
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq sin
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq sin
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq sin
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sin_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq sin
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq sin
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq sin
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq sin
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%sin = call <4 x double> @llvm.experimental.constrained.sin.v4f64(
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %sin
}
define <1 x float> @constrained_vector_cos_v1f32() {
; CHECK-LABEL: constrained_vector_cos_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq cosf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_cos_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq cosf
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%cos = call <1 x float> @llvm.experimental.constrained.cos.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %cos
}
define <2 x double> @constrained_vector_cos_v2f64() {
; CHECK-LABEL: constrained_vector_cos_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq cos
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq cos
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_cos_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq cos
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq cos
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%cos = call <2 x double> @llvm.experimental.constrained.cos.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %cos
}
define <3 x float> @constrained_vector_cos_v3f32() {
; CHECK-LABEL: constrained_vector_cos_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq cosf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq cosf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq cosf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_cos_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq cosf
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq cosf
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq cosf
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%cos = call <3 x float> @llvm.experimental.constrained.cos.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %cos
}
define <3 x double> @constrained_vector_cos_v3f64() {
; CHECK-LABEL: constrained_vector_cos_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq cos
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq cos
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq cos
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_cos_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq cos
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq cos
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq cos
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%cos = call <3 x double> @llvm.experimental.constrained.cos.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %cos
}
define <4 x double> @constrained_vector_cos_v4f64() {
; CHECK-LABEL: constrained_vector_cos_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq cos
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq cos
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq cos
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq cos
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_cos_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq cos
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq cos
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq cos
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq cos
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%cos = call <4 x double> @llvm.experimental.constrained.cos.v4f64(
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %cos
}
define <1 x float> @constrained_vector_exp_v1f32() {
; CHECK-LABEL: constrained_vector_exp_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq expf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_exp_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq expf
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%exp = call <1 x float> @llvm.experimental.constrained.exp.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %exp
}
define <2 x double> @constrained_vector_exp_v2f64() {
; CHECK-LABEL: constrained_vector_exp_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_exp_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%exp = call <2 x double> @llvm.experimental.constrained.exp.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %exp
}
define <3 x float> @constrained_vector_exp_v3f32() {
; CHECK-LABEL: constrained_vector_exp_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq expf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq expf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq expf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_exp_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq expf
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq expf
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq expf
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%exp = call <3 x float> @llvm.experimental.constrained.exp.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %exp
}
define <3 x double> @constrained_vector_exp_v3f64() {
; CHECK-LABEL: constrained_vector_exp_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_exp_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq exp
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%exp = call <3 x double> @llvm.experimental.constrained.exp.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %exp
}
define <4 x double> @constrained_vector_exp_v4f64() {
; CHECK-LABEL: constrained_vector_exp_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_exp_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%exp = call <4 x double> @llvm.experimental.constrained.exp.v4f64(
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %exp
}
define <1 x float> @constrained_vector_exp2_v1f32() {
; CHECK-LABEL: constrained_vector_exp2_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq exp2f
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_exp2_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq exp2f
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%exp2 = call <1 x float> @llvm.experimental.constrained.exp2.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %exp2
}
define <2 x double> @constrained_vector_exp2_v2f64() {
; CHECK-LABEL: constrained_vector_exp2_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp2
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp2
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_exp2_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp2
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp2
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%exp2 = call <2 x double> @llvm.experimental.constrained.exp2.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %exp2
}
define <3 x float> @constrained_vector_exp2_v3f32() {
; CHECK-LABEL: constrained_vector_exp2_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq exp2f
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq exp2f
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq exp2f
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_exp2_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq exp2f
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq exp2f
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq exp2f
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%exp2 = call <3 x float> @llvm.experimental.constrained.exp2.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %exp2
}
define <3 x double> @constrained_vector_exp2_v3f64() {
; CHECK-LABEL: constrained_vector_exp2_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp2
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp2
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp2
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_exp2_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp2
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp2
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq exp2
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%exp2 = call <3 x double> @llvm.experimental.constrained.exp2.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %exp2
}
define <4 x double> @constrained_vector_exp2_v4f64() {
; CHECK-LABEL: constrained_vector_exp2_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp2
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp2
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp2
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq exp2
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_exp2_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp2
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp2
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp2
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq exp2
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%exp2 = call <4 x double> @llvm.experimental.constrained.exp2.v4f64(
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %exp2
}
define <1 x float> @constrained_vector_log_v1f32() {
; CHECK-LABEL: constrained_vector_log_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq logf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq logf
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log = call <1 x float> @llvm.experimental.constrained.log.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %log
}
define <2 x double> @constrained_vector_log_v2f64() {
; CHECK-LABEL: constrained_vector_log_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log = call <2 x double> @llvm.experimental.constrained.log.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %log
}
define <3 x float> @constrained_vector_log_v3f32() {
; CHECK-LABEL: constrained_vector_log_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq logf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq logf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq logf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq logf
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq logf
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq logf
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log = call <3 x float> @llvm.experimental.constrained.log.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %log
}
define <3 x double> @constrained_vector_log_v3f64() {
; CHECK-LABEL: constrained_vector_log_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq log
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log = call <3 x double> @llvm.experimental.constrained.log.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %log
}
define <4 x double> @constrained_vector_log_v4f64() {
; CHECK-LABEL: constrained_vector_log_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log = call <4 x double> @llvm.experimental.constrained.log.v4f64(
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %log
}
define <1 x float> @constrained_vector_log10_v1f32() {
; CHECK-LABEL: constrained_vector_log10_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq log10f
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log10_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq log10f
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log10 = call <1 x float> @llvm.experimental.constrained.log10.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %log10
}
define <2 x double> @constrained_vector_log10_v2f64() {
; CHECK-LABEL: constrained_vector_log10_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log10
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log10
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log10_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log10
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log10
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log10 = call <2 x double> @llvm.experimental.constrained.log10.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %log10
}
define <3 x float> @constrained_vector_log10_v3f32() {
; CHECK-LABEL: constrained_vector_log10_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq log10f
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq log10f
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq log10f
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log10_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq log10f
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq log10f
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq log10f
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log10 = call <3 x float> @llvm.experimental.constrained.log10.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %log10
}
define <3 x double> @constrained_vector_log10_v3f64() {
; CHECK-LABEL: constrained_vector_log10_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log10
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log10
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log10
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log10_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log10
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log10
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq log10
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log10 = call <3 x double> @llvm.experimental.constrained.log10.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %log10
}
define <4 x double> @constrained_vector_log10_v4f64() {
; CHECK-LABEL: constrained_vector_log10_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log10
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log10
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log10
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log10
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log10_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log10
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log10
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log10
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log10
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log10 = call <4 x double> @llvm.experimental.constrained.log10.v4f64(
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %log10
}
define <1 x float> @constrained_vector_log2_v1f32() {
; CHECK-LABEL: constrained_vector_log2_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq log2f
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log2_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq log2f
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log2 = call <1 x float> @llvm.experimental.constrained.log2.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %log2
}
define <2 x double> @constrained_vector_log2_v2f64() {
; CHECK-LABEL: constrained_vector_log2_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log2
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log2
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log2_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log2
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log2
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log2 = call <2 x double> @llvm.experimental.constrained.log2.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %log2
}
define <3 x float> @constrained_vector_log2_v3f32() {
; CHECK-LABEL: constrained_vector_log2_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq log2f
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq log2f
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq log2f
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log2_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq log2f
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq log2f
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq log2f
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log2 = call <3 x float> @llvm.experimental.constrained.log2.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %log2
}
define <3 x double> @constrained_vector_log2_v3f64() {
; CHECK-LABEL: constrained_vector_log2_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log2
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log2
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log2
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log2_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log2
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log2
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq log2
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log2 = call <3 x double> @llvm.experimental.constrained.log2.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %log2
}
define <4 x double> @constrained_vector_log2_v4f64() {
; CHECK-LABEL: constrained_vector_log2_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log2
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log2
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log2
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq log2
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_log2_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log2
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log2
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log2
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq log2
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%log2 = call <4 x double> @llvm.experimental.constrained.log2.v4f64(
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %log2
}
define <1 x float> @constrained_vector_rint_v1f32() {
; CHECK-LABEL: constrained_vector_rint_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq rintf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_rint_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%rint = call <1 x float> @llvm.experimental.constrained.rint.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %rint
}
define <2 x double> @constrained_vector_rint_v2f64() {
; CHECK-LABEL: constrained_vector_rint_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq rint
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq rint
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_rint_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %xmm0
; AVX-NEXT: retq
entry:
%rint = call <2 x double> @llvm.experimental.constrained.rint.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %rint
}
define <3 x float> @constrained_vector_rint_v3f32() {
; CHECK-LABEL: constrained_vector_rint_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq rintf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq rintf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq rintf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_rint_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $4, %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $4, %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX-NEXT: retq
entry:
%rint = call <3 x float> @llvm.experimental.constrained.rint.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %rint
}
define <3 x double> @constrained_vector_rint_v3f64() {
; CHECK-LABEL: constrained_vector_rint_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq rint
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq rint
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq rint
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_rint_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vroundsd $4, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%rint = call <3 x double> @llvm.experimental.constrained.rint.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %rint
}
define <4 x double> @constrained_vector_rint_v4f64() {
; CHECK-LABEL: constrained_vector_rint_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq rint
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq rint
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq rint
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq rint
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_rint_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %ymm0
; AVX-NEXT: retq
entry:
%rint = call <4 x double> @llvm.experimental.constrained.rint.v4f64(
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %rint
}
define <1 x float> @constrained_vector_nearbyint_v1f32() {
; CHECK-LABEL: constrained_vector_nearbyint_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq nearbyintf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_nearbyint_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%nearby = call <1 x float> @llvm.experimental.constrained.nearbyint.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %nearby
}
define <2 x double> @constrained_vector_nearbyint_v2f64() {
; CHECK-LABEL: constrained_vector_nearbyint_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq nearbyint
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq nearbyint
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_nearbyint_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %xmm0
; AVX-NEXT: retq
entry:
%nearby = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %nearby
}
define <3 x float> @constrained_vector_nearbyint_v3f32() {
; CHECK-LABEL: constrained_vector_nearbyint_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq nearbyintf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq nearbyintf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq nearbyintf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_nearbyint_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $12, %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $12, %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX-NEXT: retq
entry:
%nearby = call <3 x float> @llvm.experimental.constrained.nearbyint.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %nearby
}
define <3 x double> @constrained_vector_nearby_v3f64() {
; CHECK-LABEL: constrained_vector_nearby_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq nearbyint
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq nearbyint
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq nearbyint
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_nearby_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%nearby = call <3 x double> @llvm.experimental.constrained.nearbyint.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %nearby
}
define <4 x double> @constrained_vector_nearbyint_v4f64() {
; CHECK-LABEL: constrained_vector_nearbyint_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq nearbyint
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq nearbyint
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq nearbyint
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq nearbyint
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_nearbyint_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %ymm0
; AVX-NEXT: retq
entry:
%nearby = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %nearby
}
define <1 x float> @constrained_vector_maxnum_v1f32() {
; CHECK-LABEL: constrained_vector_maxnum_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fmaxf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_maxnum_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq fmaxf
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%max = call <1 x float> @llvm.experimental.constrained.maxnum.v1f32(
<1 x float> <float 42.0>, <1 x float> <float 41.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %max
}
define <2 x double> @constrained_vector_maxnum_v2f64() {
; CHECK-LABEL: constrained_vector_maxnum_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmax
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmax
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_maxnum_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmax
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmax
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%max = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(
<2 x double> <double 43.0, double 42.0>,
<2 x double> <double 41.0, double 40.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %max
}
define <3 x float> @constrained_vector_maxnum_v3f32() {
; CHECK-LABEL: constrained_vector_maxnum_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fmaxf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fmaxf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fmaxf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_maxnum_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq fmaxf
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq fmaxf
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq fmaxf
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%max = call <3 x float> @llvm.experimental.constrained.maxnum.v3f32(
<3 x float> <float 43.0, float 44.0, float 45.0>,
<3 x float> <float 41.0, float 42.0, float 43.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %max
}
define <3 x double> @constrained_vector_max_v3f64() {
; CHECK-LABEL: constrained_vector_max_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmax
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmax
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmax
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_max_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmax
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmax
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq fmax
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%max = call <3 x double> @llvm.experimental.constrained.maxnum.v3f64(
<3 x double> <double 43.0, double 44.0, double 45.0>,
<3 x double> <double 40.0, double 41.0, double 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %max
}
define <4 x double> @constrained_vector_maxnum_v4f64() {
; CHECK-LABEL: constrained_vector_maxnum_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmax
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmax
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmax
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmax
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_maxnum_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmax
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmax
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmax
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmax
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%max = call <4 x double> @llvm.experimental.constrained.maxnum.v4f64(
<4 x double> <double 44.0, double 45.0,
double 46.0, double 47.0>,
<4 x double> <double 40.0, double 41.0,
double 42.0, double 43.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %max
}
define <1 x float> @constrained_vector_minnum_v1f32() {
; CHECK-LABEL: constrained_vector_minnum_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fminf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_minnum_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq fminf
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%min = call <1 x float> @llvm.experimental.constrained.minnum.v1f32(
<1 x float> <float 42.0>, <1 x float> <float 41.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %min
}
define <2 x double> @constrained_vector_minnum_v2f64() {
; CHECK-LABEL: constrained_vector_minnum_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmin
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmin
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_minnum_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmin
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmin
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%min = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(
<2 x double> <double 43.0, double 42.0>,
<2 x double> <double 41.0, double 40.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %min
}
define <3 x float> @constrained_vector_minnum_v3f32() {
; CHECK-LABEL: constrained_vector_minnum_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fminf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fminf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: callq fminf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_minnum_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq fminf
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq fminf
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: callq fminf
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%min = call <3 x float> @llvm.experimental.constrained.minnum.v3f32(
<3 x float> <float 43.0, float 44.0, float 45.0>,
<3 x float> <float 41.0, float 42.0, float 43.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %min
}
define <3 x double> @constrained_vector_min_v3f64() {
; CHECK-LABEL: constrained_vector_min_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmin
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmin
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmin
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_min_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmin
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmin
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq fmin
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%min = call <3 x double> @llvm.experimental.constrained.minnum.v3f64(
<3 x double> <double 43.0, double 44.0, double 45.0>,
<3 x double> <double 40.0, double 41.0, double 42.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %min
}
define <4 x double> @constrained_vector_minnum_v4f64() {
; CHECK-LABEL: constrained_vector_minnum_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmin
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmin
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmin
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: callq fmin
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_minnum_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmin
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmin
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmin
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: callq fmin
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%min = call <4 x double> @llvm.experimental.constrained.minnum.v4f64(
<4 x double> <double 44.0, double 45.0,
double 46.0, double 47.0>,
<4 x double> <double 40.0, double 41.0,
double 42.0, double 43.0>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x double> %min
}
define <1 x i32> @constrained_vector_fptosi_v1i32_v1f32() {
; CHECK-LABEL: constrained_vector_fptosi_v1i32_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v1i32_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: retq
entry:
%result = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32(
<1 x float><float 42.0>,
metadata !"fpexcept.strict")
ret <1 x i32> %result
}
define <2 x i32> @constrained_vector_fptosi_v2i32_v2f32() {
; CHECK-LABEL: constrained_vector_fptosi_v2i32_v2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v2i32_v2f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32(
<2 x float><float 42.0, float 43.0>,
metadata !"fpexcept.strict")
ret <2 x i32> %result
}
define <3 x i32> @constrained_vector_fptosi_v3i32_v3f32() {
; CHECK-LABEL: constrained_vector_fptosi_v3i32_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v3i32_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f32(
<3 x float><float 42.0, float 43.0,
float 44.0>,
metadata !"fpexcept.strict")
ret <3 x i32> %result
}
define <4 x i32> @constrained_vector_fptosi_v4i32_v4f32() {
; CHECK-LABEL: constrained_vector_fptosi_v4i32_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm2
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v4i32_v4f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
metadata !"fpexcept.strict")
ret <4 x i32> %result
}
define <1 x i64> @constrained_vector_fptosi_v1i64_v1f32() {
; CHECK-LABEL: constrained_vector_fptosi_v1i64_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v1i64_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: retq
entry:
%result = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32(
<1 x float><float 42.0>,
metadata !"fpexcept.strict")
ret <1 x i64> %result
}
define <2 x i64> @constrained_vector_fptosi_v2i64_v2f32() {
; CHECK-LABEL: constrained_vector_fptosi_v2i64_v2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm0
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v2i64_v2f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
entry:
%result = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(
<2 x float><float 42.0, float 43.0>,
metadata !"fpexcept.strict")
ret <2 x i64> %result
}
define <3 x i64> @constrained_vector_fptosi_v3i64_v3f32() {
; CHECK-LABEL: constrained_vector_fptosi_v3i64_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rdx
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rcx
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v3i64_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
%result = call <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f32(
<3 x float><float 42.0, float 43.0,
float 44.0>,
metadata !"fpexcept.strict")
ret <3 x i64> %result
}
define <4 x i64> @constrained_vector_fptosi_v4i64_v4f32() {
; CHECK-LABEL: constrained_vector_fptosi_v4i64_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm0
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm2
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v4i64_v4f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm2
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%result = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
metadata !"fpexcept.strict")
ret <4 x i64> %result
}
define <1 x i32> @constrained_vector_fptosi_v1i32_v1f64() {
; CHECK-LABEL: constrained_vector_fptosi_v1i32_v1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v1i32_v1f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: retq
entry:
%result = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(
<1 x double><double 42.1>,
metadata !"fpexcept.strict")
ret <1 x i32> %result
}
define <2 x i32> @constrained_vector_fptosi_v2i32_v2f64() {
; CHECK-LABEL: constrained_vector_fptosi_v2i32_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v2i32_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(
<2 x double><double 42.1, double 42.2>,
metadata !"fpexcept.strict")
ret <2 x i32> %result
}
define <3 x i32> @constrained_vector_fptosi_v3i32_v3f64() {
; CHECK-LABEL: constrained_vector_fptosi_v3i32_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v3i32_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f64(
<3 x double><double 42.1, double 42.2,
double 42.3>,
metadata !"fpexcept.strict")
ret <3 x i32> %result
}
define <4 x i32> @constrained_vector_fptosi_v4i32_v4f64() {
; CHECK-LABEL: constrained_vector_fptosi_v4i32_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm2
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v4i32_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f64(
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"fpexcept.strict")
ret <4 x i32> %result
}
define <1 x i64> @constrained_vector_fptosi_v1i64_v1f64() {
; CHECK-LABEL: constrained_vector_fptosi_v1i64_v1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v1i64_v1f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: retq
entry:
%result = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(
<1 x double><double 42.1>,
metadata !"fpexcept.strict")
ret <1 x i64> %result
}
define <2 x i64> @constrained_vector_fptosi_v2i64_v2f64() {
; CHECK-LABEL: constrained_vector_fptosi_v2i64_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm0
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v2i64_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
entry:
%result = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(
<2 x double><double 42.1, double 42.2>,
metadata !"fpexcept.strict")
ret <2 x i64> %result
}
define <3 x i64> @constrained_vector_fptosi_v3i64_v3f64() {
; CHECK-LABEL: constrained_vector_fptosi_v3i64_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rdx
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rcx
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v3i64_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
%result = call <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f64(
<3 x double><double 42.1, double 42.2,
double 42.3>,
metadata !"fpexcept.strict")
ret <3 x i64> %result
}
define <4 x i64> @constrained_vector_fptosi_v4i64_v4f64() {
; CHECK-LABEL: constrained_vector_fptosi_v4i64_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm0
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm2
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptosi_v4i64_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm2
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%result = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"fpexcept.strict")
ret <4 x i64> %result
}
define <1 x i32> @constrained_vector_fptoui_v1i32_v1f32() {
; CHECK-LABEL: constrained_vector_fptoui_v1i32_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v1i32_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: retq
entry:
%result = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32(
<1 x float><float 42.0>,
metadata !"fpexcept.strict")
ret <1 x i32> %result
}
define <2 x i32> @constrained_vector_fptoui_v2i32_v2f32() {
; CHECK-LABEL: constrained_vector_fptoui_v2i32_v2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v2i32_v2f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx
; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(
<2 x float><float 42.0, float 43.0>,
metadata !"fpexcept.strict")
ret <2 x i32> %result
}
define <3 x i32> @constrained_vector_fptoui_v3i32_v3f32() {
; CHECK-LABEL: constrained_vector_fptoui_v3i32_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v3i32_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx
; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f32(
<3 x float><float 42.0, float 43.0,
float 44.0>,
metadata !"fpexcept.strict")
ret <3 x i32> %result
}
define <4 x i32> @constrained_vector_fptoui_v4i32_v4f32() {
; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm2
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v4i32_v4f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx
; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
metadata !"fpexcept.strict")
ret <4 x i32> %result
}
define <1 x i64> @constrained_vector_fptoui_v1i64_v1f32() {
; CHECK-LABEL: constrained_vector_fptoui_v1i64_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v1i64_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: retq
entry:
%result = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f32(
<1 x float><float 42.0>,
metadata !"fpexcept.strict")
ret <1 x i64> %result
}
define <2 x i64> @constrained_vector_fptoui_v2i64_v2f32() {
; CHECK-LABEL: constrained_vector_fptoui_v2i64_v2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm0
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v2i64_v2f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
entry:
%result = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(
<2 x float><float 42.0, float 43.0>,
metadata !"fpexcept.strict")
ret <2 x i64> %result
}
define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() {
; CHECK-LABEL: constrained_vector_fptoui_v3i64_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rdx
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rcx
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v3i64_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
%result = call <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f32(
<3 x float><float 42.0, float 43.0,
float 44.0>,
metadata !"fpexcept.strict")
ret <3 x i64> %result
}
define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() {
; CHECK-LABEL: constrained_vector_fptoui_v4i64_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm0
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm2
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v4i64_v4f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm2
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%result = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
metadata !"fpexcept.strict")
ret <4 x i64> %result
}
define <1 x i32> @constrained_vector_fptoui_v1i32_v1f64() {
; CHECK-LABEL: constrained_vector_fptoui_v1i32_v1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v1i32_v1f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: retq
entry:
%result = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(
<1 x double><double 42.1>,
metadata !"fpexcept.strict")
ret <1 x i32> %result
}
define <2 x i32> @constrained_vector_fptoui_v2i32_v2f64() {
; CHECK-LABEL: constrained_vector_fptoui_v2i32_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v2i32_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx
; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(
<2 x double><double 42.1, double 42.2>,
metadata !"fpexcept.strict")
ret <2 x i32> %result
}
define <3 x i32> @constrained_vector_fptoui_v3i32_v3f64() {
; CHECK-LABEL: constrained_vector_fptoui_v3i32_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v3i32_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx
; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f64(
<3 x double><double 42.1, double 42.2,
double 42.3>,
metadata !"fpexcept.strict")
ret <3 x i32> %result
}
define <4 x i32> @constrained_vector_fptoui_v4i32_v4f64() {
; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm2
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v4i32_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx
; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"fpexcept.strict")
ret <4 x i32> %result
}
define <1 x i64> @constrained_vector_fptoui_v1i64_v1f64() {
; CHECK-LABEL: constrained_vector_fptoui_v1i64_v1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v1i64_v1f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: retq
entry:
%result = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(
<1 x double><double 42.1>,
metadata !"fpexcept.strict")
ret <1 x i64> %result
}
define <2 x i64> @constrained_vector_fptoui_v2i64_v2f64() {
; CHECK-LABEL: constrained_vector_fptoui_v2i64_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm0
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v2i64_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
entry:
%result = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(
<2 x double><double 42.1, double 42.2>,
metadata !"fpexcept.strict")
ret <2 x i64> %result
}
define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() {
; CHECK-LABEL: constrained_vector_fptoui_v3i64_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rdx
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rcx
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v3i64_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
%result = call <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f64(
<3 x double><double 42.1, double 42.2,
double 42.3>,
metadata !"fpexcept.strict")
ret <3 x i64> %result
}
define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() {
; CHECK-LABEL: constrained_vector_fptoui_v4i64_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm0
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm2
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptoui_v4i64_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm1
; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX-NEXT: vmovq %rax, %xmm2
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%result = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64(
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"fpexcept.strict")
ret <4 x i64> %result
}
define <1 x float> @constrained_vector_fptrunc_v1f64() {
; CHECK-LABEL: constrained_vector_fptrunc_v1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptrunc_v1f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <1 x float> @llvm.experimental.constrained.fptrunc.v1f32.v1f64(
<1 x double><double 42.1>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %result
}
define <2 x float> @constrained_vector_fptrunc_v2f64() {
; CHECK-LABEL: constrained_vector_fptrunc_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm1
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptrunc_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: retq
entry:
%result = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(
<2 x double><double 42.1, double 42.2>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x float> %result
}
define <3 x float> @constrained_vector_fptrunc_v3f64() {
; CHECK-LABEL: constrained_vector_fptrunc_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm1
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: cvtsd2ss %xmm1, %xmm1
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptrunc_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
; AVX-NEXT: retq
entry:
%result = call <3 x float> @llvm.experimental.constrained.fptrunc.v3f32.v3f64(
<3 x double><double 42.1, double 42.2,
double 42.3>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %result
}
define <4 x float> @constrained_vector_fptrunc_v4f64() {
; CHECK-LABEL: constrained_vector_fptrunc_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: cvtsd2ss %xmm1, %xmm1
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm2
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fptrunc_v4f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtpd2psy {{.*}}(%rip), %xmm0
; AVX-NEXT: retq
entry:
%result = call <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <4 x float> %result
}
define <1 x double> @constrained_vector_fpext_v1f32() {
; CHECK-LABEL: constrained_vector_fpext_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fpext_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(
<1 x float><float 42.0>,
metadata !"fpexcept.strict")
ret <1 x double> %result
}
define <2 x double> @constrained_vector_fpext_v2f32() {
; CHECK-LABEL: constrained_vector_fpext_v2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm0, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fpext_v2f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
entry:
%result = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(
<2 x float><float 42.0, float 43.0>,
metadata !"fpexcept.strict")
ret <2 x double> %result
}
define <3 x double> @constrained_vector_fpext_v3f32() {
; CHECK-LABEL: constrained_vector_fpext_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm1, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm2, %xmm2
; CHECK-NEXT: movsd %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fpext_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
%result = call <3 x double> @llvm.experimental.constrained.fpext.v3f64.v3f32(
<3 x float><float 42.0, float 43.0,
float 44.0>,
metadata !"fpexcept.strict")
ret <3 x double> %result
}
define <4 x double> @constrained_vector_fpext_v4f32() {
; CHECK-LABEL: constrained_vector_fpext_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm0, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm1, %xmm2
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm1, %xmm1
; CHECK-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fpext_v4f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtps2pd {{.*}}(%rip), %ymm0
; AVX-NEXT: retq
entry:
%result = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
metadata !"fpexcept.strict")
ret <4 x double> %result
}
define <1 x float> @constrained_vector_ceil_v1f32() {
; CHECK-LABEL: constrained_vector_ceil_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq ceilf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_ceil_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $10, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%ceil = call <1 x float> @llvm.experimental.constrained.ceil.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %ceil
}
define <2 x double> @constrained_vector_ceil_v2f64() {
; CHECK-LABEL: constrained_vector_ceil_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq ceil
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq ceil
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_ceil_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vroundpd $10, {{.*}}(%rip), %xmm0
; AVX-NEXT: retq
entry:
%ceil = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %ceil
}
define <3 x float> @constrained_vector_ceil_v3f32() {
; CHECK-LABEL: constrained_vector_ceil_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq ceilf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq ceilf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq ceilf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_ceil_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $10, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $10, %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $10, %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX-NEXT: retq
entry:
%ceil = call <3 x float> @llvm.experimental.constrained.ceil.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %ceil
}
define <3 x double> @constrained_vector_ceil_v3f64() {
; CHECK-LABEL: constrained_vector_ceil_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq ceil
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq ceil
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq ceil
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_ceil_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vroundsd $10, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vroundpd $10, {{.*}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%ceil = call <3 x double> @llvm.experimental.constrained.ceil.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %ceil
}
define <1 x float> @constrained_vector_floor_v1f32() {
; CHECK-LABEL: constrained_vector_floor_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq floorf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_floor_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%floor = call <1 x float> @llvm.experimental.constrained.floor.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %floor
}
define <2 x double> @constrained_vector_floor_v2f64() {
; CHECK-LABEL: constrained_vector_floor_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq floor
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq floor
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_floor_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vroundpd $9, {{.*}}(%rip), %xmm0
; AVX-NEXT: retq
entry:
%floor = call <2 x double> @llvm.experimental.constrained.floor.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %floor
}
define <3 x float> @constrained_vector_floor_v3f32() {
; CHECK-LABEL: constrained_vector_floor_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq floorf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq floorf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq floorf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_floor_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $9, %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $9, %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX-NEXT: retq
entry:
%floor = call <3 x float> @llvm.experimental.constrained.floor.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %floor
}
define <3 x double> @constrained_vector_floor_v3f64() {
; CHECK-LABEL: constrained_vector_floor_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq floor
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq floor
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq floor
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_floor_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vroundsd $9, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vroundpd $9, {{.*}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%floor = call <3 x double> @llvm.experimental.constrained.floor.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %floor
}
define <1 x float> @constrained_vector_round_v1f32() {
; CHECK-LABEL: constrained_vector_round_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq roundf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_round_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq roundf
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%round = call <1 x float> @llvm.experimental.constrained.round.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %round
}
define <2 x double> @constrained_vector_round_v2f64() {
; CHECK-LABEL: constrained_vector_round_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq round
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq round
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_round_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 32
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq round
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq round
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%round = call <2 x double> @llvm.experimental.constrained.round.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %round
}
define <3 x float> @constrained_vector_round_v3f32() {
; CHECK-LABEL: constrained_vector_round_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq roundf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq roundf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq roundf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_round_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq roundf
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq roundf
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: callq roundf
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%round = call <3 x float> @llvm.experimental.constrained.round.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %round
}
define <3 x double> @constrained_vector_round_v3f64() {
; CHECK-LABEL: constrained_vector_round_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq round
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq round
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq round
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_round_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 64
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq round
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: callq round
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq round
; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: addq $56, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
entry:
%round = call <3 x double> @llvm.experimental.constrained.round.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %round
}
define <1 x float> @constrained_vector_trunc_v1f32() {
; CHECK-LABEL: constrained_vector_trunc_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq truncf
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_trunc_v1f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <1 x float> %trunc
}
define <2 x double> @constrained_vector_trunc_v2f64() {
; CHECK-LABEL: constrained_vector_trunc_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq trunc
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq trunc
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_trunc_v2f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vroundpd $11, {{.*}}(%rip), %xmm0
; AVX-NEXT: retq
entry:
%trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <2 x double> %trunc
}
define <3 x float> @constrained_vector_trunc_v3f32() {
; CHECK-LABEL: constrained_vector_trunc_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq truncf
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq truncf
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq truncf
; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_trunc_v3f32:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $11, %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX-NEXT: vroundss $11, %xmm2, %xmm2, %xmm2
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX-NEXT: retq
entry:
%trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x float> %trunc
}
define <3 x double> @constrained_vector_trunc_v3f64() {
; CHECK-LABEL: constrained_vector_trunc_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq trunc
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq trunc
; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: callq trunc
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_trunc_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
; AVX-NEXT: vroundpd $11, {{.*}}(%rip), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
%trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret <3 x double> %trunc
}
; Single width declarations
declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.frem.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.pow.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.powi.v2f64(<2 x double>, i32, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.sin.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.cos.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.exp.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.exp2.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.log.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.log10.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.log2.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32(<2 x float>, metadata)
declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float>, metadata)
declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata)
declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata)
declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(<2 x float>, metadata)
declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float>, metadata)
declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata)
declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata)
declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata)
declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata, metadata)
; Scalar width declarations
declare <1 x float> @llvm.experimental.constrained.fadd.v1f32(<1 x float>, <1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.fsub.v1f32(<1 x float>, <1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float>, <1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.fdiv.v1f32(<1 x float>, <1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.frem.v1f32(<1 x float>, <1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.sqrt.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.pow.v1f32(<1 x float>, <1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.powi.v1f32(<1 x float>, i32, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.sin.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.cos.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.exp.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.exp2.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.log.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.log10.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.log2.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.rint.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.nearbyint.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.maxnum.v1f32(<1 x float>, <1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.minnum.v1f32(<1 x float>, <1 x float>, metadata, metadata)
declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32(<1 x float>, metadata)
declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32(<1 x float>, metadata)
declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double>, metadata)
declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double>, metadata)
declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32(<1 x float>, metadata)
declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f32(<1 x float>, metadata)
declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double>, metadata)
declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double>, metadata)
declare <1 x float> @llvm.experimental.constrained.fptrunc.v1f32.v1f64(<1 x double>, metadata, metadata)
declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float>, metadata)
declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata, metadata)
; Illegal width declarations
declare <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float>, <3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.fadd.v3f64(<3 x double>, <3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.fsub.v3f32(<3 x float>, <3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.fsub.v3f64(<3 x double>, <3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.fmul.v3f32(<3 x float>, <3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.fmul.v3f64(<3 x double>, <3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.fdiv.v3f32(<3 x float>, <3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.fdiv.v3f64(<3 x double>, <3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.frem.v3f32(<3 x float>, <3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.frem.v3f64(<3 x double>, <3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.sqrt.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.sqrt.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.pow.v3f32(<3 x float>, <3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.pow.v3f64(<3 x double>, <3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.powi.v3f32(<3 x float>, i32, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.powi.v3f64(<3 x double>, i32, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.sin.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.sin.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.cos.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.cos.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.exp.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.exp.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.exp2.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.exp2.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.log.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.log.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.log10.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.log10.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.log2.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.log2.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.rint.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.rint.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.nearbyint.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.nearbyint.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.maxnum.v3f32(<3 x float>, <3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.maxnum.v3f64(<3 x double>, <3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.minnum.v3f32(<3 x float>, <3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.minnum.v3f64(<3 x double>, <3 x double>, metadata, metadata)
declare <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f32(<3 x float>, metadata)
declare <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f32(<3 x float>, metadata)
declare <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f64(<3 x double>, metadata)
declare <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f64(<3 x double>, metadata)
declare <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f32(<3 x float>, metadata)
declare <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f32(<3 x float>, metadata)
declare <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f64(<3 x double>, metadata)
declare <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f64(<3 x double>, metadata)
declare <3 x float> @llvm.experimental.constrained.fptrunc.v3f32.v3f64(<3 x double>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.fpext.v3f64.v3f32(<3 x float>, metadata)
declare <3 x float> @llvm.experimental.constrained.ceil.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.ceil.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.floor.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.round.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.round.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.trunc.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.trunc.v3f64(<3 x double>, metadata, metadata)
; Double width declarations
declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.frem.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.pow.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.powi.v4f64(<4 x double>, i32, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.sin.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.cos.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.exp.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.exp2.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.log.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.log10.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.log2.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.rint.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.maxnum.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.minnum.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata)
declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float>, metadata)
declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f64(<4 x double>, metadata)
declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(<4 x double>, metadata)
declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>, metadata)
declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float>, metadata)
declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(<4 x double>, metadata)
declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64(<4 x double>, metadata)
declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata)
declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata, metadata)