forked from OSchip/llvm-project
327 lines
13 KiB
LLVM
327 lines
13 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s
|
|
|
|
define <2 x double> @constrained_vector_fdiv() {
|
|
; CHECK-LABEL: constrained_vector_fdiv:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.000000e+00,2.000000e+00]
|
|
; CHECK-NEXT: divpd {{.*}}(%rip), %xmm0
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%div = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(
|
|
<2 x double> <double 1.000000e+00, double 2.000000e+00>,
|
|
<2 x double> <double 1.000000e+01, double 1.000000e+01>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %div
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_fmul(<2 x double> %a) {
|
|
; CHECK-LABEL: constrained_vector_fmul:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.797693e+308,1.797693e+308]
|
|
; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%mul = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(
|
|
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
|
|
<2 x double> <double 2.000000e+00, double 3.000000e+00>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %mul
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_fadd() {
|
|
; CHECK-LABEL: constrained_vector_fadd:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.797693e+308,1.797693e+308]
|
|
; CHECK-NEXT: addpd {{.*}}(%rip), %xmm0
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%add = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(
|
|
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
|
|
<2 x double> <double 1.000000e+00, double 1.000000e-01>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %add
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_fsub() {
|
|
; CHECK-LABEL: constrained_vector_fsub:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [-1.797693e+308,-1.797693e+308]
|
|
; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%sub = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(
|
|
<2 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>,
|
|
<2 x double> <double 1.000000e+00, double 1.000000e-01>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %sub
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_sqrt() {
|
|
; CHECK-LABEL: constrained_vector_sqrt:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%sqrt = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(
|
|
<2 x double> <double 42.0, double 42.1>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %sqrt
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_pow() {
|
|
; CHECK-LABEL: constrained_vector_pow:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: subq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
|
; CHECK-NEXT: callq pow
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
|
; CHECK-NEXT: callq pow
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; CHECK-NEXT: addq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%pow = call <2 x double> @llvm.experimental.constrained.pow.v2f64(
|
|
<2 x double> <double 42.1, double 42.2>,
|
|
<2 x double> <double 3.0, double 3.0>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %pow
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_sin() {
|
|
; CHECK-LABEL: constrained_vector_sin:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: subq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq sin
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq sin
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; CHECK-NEXT: addq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%sin = call <2 x double> @llvm.experimental.constrained.sin.v2f64(
|
|
<2 x double> <double 42.0, double 42.1>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %sin
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_cos() {
|
|
; CHECK-LABEL: constrained_vector_cos:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: subq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq cos
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq cos
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; CHECK-NEXT: addq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%cos = call <2 x double> @llvm.experimental.constrained.cos.v2f64(
|
|
<2 x double> <double 42.0, double 42.1>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %cos
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_exp() {
|
|
; CHECK-LABEL: constrained_vector_exp:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: subq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq exp
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq exp
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; CHECK-NEXT: addq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%exp = call <2 x double> @llvm.experimental.constrained.exp.v2f64(
|
|
<2 x double> <double 42.0, double 42.1>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %exp
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_exp2() {
|
|
; CHECK-LABEL: constrained_vector_exp2:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: subq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq exp2
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq exp2
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; CHECK-NEXT: addq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%exp2 = call <2 x double> @llvm.experimental.constrained.exp2.v2f64(
|
|
<2 x double> <double 42.1, double 42.0>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %exp2
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_log() {
|
|
; CHECK-LABEL: constrained_vector_log:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: subq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq log
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq log
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; CHECK-NEXT: addq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%log = call <2 x double> @llvm.experimental.constrained.log.v2f64(
|
|
<2 x double> <double 42.0, double 42.1>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %log
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_log10() {
|
|
; CHECK-LABEL: constrained_vector_log10:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: subq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq log10
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq log10
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; CHECK-NEXT: addq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%log10 = call <2 x double> @llvm.experimental.constrained.log10.v2f64(
|
|
<2 x double> <double 42.0, double 42.1>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %log10
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_log2() {
|
|
; CHECK-LABEL: constrained_vector_log2:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: subq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq log2
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq log2
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; CHECK-NEXT: addq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%log2 = call <2 x double> @llvm.experimental.constrained.log2.v2f64(
|
|
<2 x double> <double 42.0, double 42.1>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %log2
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_rint() {
|
|
; CHECK-LABEL: constrained_vector_rint:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: subq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq rint
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq rint
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; CHECK-NEXT: addq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%rint = call <2 x double> @llvm.experimental.constrained.rint.v2f64(
|
|
<2 x double> <double 42.1, double 42.0>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %rint
|
|
}
|
|
|
|
define <2 x double> @constrained_vector_nearbyint() {
|
|
; CHECK-LABEL: constrained_vector_nearbyint:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: subq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq nearbyint
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
|
; CHECK-NEXT: callq nearbyint
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; CHECK-NEXT: addq $24, %rsp
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%nearby = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
|
|
<2 x double> <double 42.1, double 42.0>,
|
|
metadata !"round.dynamic",
|
|
metadata !"fpexcept.strict")
|
|
ret <2 x double> %nearby
|
|
}
|
|
|
|
|
|
declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
|
|
declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.pow.v2f64(<2 x double>, <2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.sin.v2f64(<2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.cos.v2f64(<2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.exp.v2f64(<2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.exp2.v2f64(<2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.log.v2f64(<2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.log10.v2f64(<2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.log2.v2f64(<2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata)
|
|
declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
|