forked from OSchip/llvm-project
[X86][SSE] Regenerated vector float tests - fabs / floor(etc.) / fneg / float2double
llvm-svn: 265186
This commit is contained in:
parent
1e5bf0a256
commit
3243b21dae
|
@ -1,37 +1,64 @@
|
|||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
|
||||
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X32
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X64
|
||||
|
||||
define <2 x double> @fabs_v2f64(<2 x double> %p)
|
||||
{
|
||||
; CHECK-LABEL: fabs_v2f64
|
||||
; CHECK: vandpd
|
||||
define <2 x double> @fabs_v2f64(<2 x double> %p) {
|
||||
; X32-LABEL: fabs_v2f64:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: vandpd .LCPI0_0, %xmm0, %xmm0
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: fabs_v2f64:
|
||||
; X64: # BB#0:
|
||||
; X64-NEXT: vandpd {{.*}}(%rip), %xmm0, %xmm0
|
||||
; X64-NEXT: retq
|
||||
%t = call <2 x double> @llvm.fabs.v2f64(<2 x double> %p)
|
||||
ret <2 x double> %t
|
||||
}
|
||||
declare <2 x double> @llvm.fabs.v2f64(<2 x double> %p)
|
||||
|
||||
define <4 x float> @fabs_v4f32(<4 x float> %p)
|
||||
{
|
||||
; CHECK-LABEL: fabs_v4f32
|
||||
; CHECK: vandps
|
||||
define <4 x float> @fabs_v4f32(<4 x float> %p) {
|
||||
; X32-LABEL: fabs_v4f32:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: vandps .LCPI1_0, %xmm0, %xmm0
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: fabs_v4f32:
|
||||
; X64: # BB#0:
|
||||
; X64-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
|
||||
; X64-NEXT: retq
|
||||
%t = call <4 x float> @llvm.fabs.v4f32(<4 x float> %p)
|
||||
ret <4 x float> %t
|
||||
}
|
||||
declare <4 x float> @llvm.fabs.v4f32(<4 x float> %p)
|
||||
|
||||
define <4 x double> @fabs_v4f64(<4 x double> %p)
|
||||
{
|
||||
; CHECK-LABEL: fabs_v4f64
|
||||
; CHECK: vandpd
|
||||
define <4 x double> @fabs_v4f64(<4 x double> %p) {
|
||||
; X32-LABEL: fabs_v4f64:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: vandpd .LCPI2_0, %ymm0, %ymm0
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: fabs_v4f64:
|
||||
; X64: # BB#0:
|
||||
; X64-NEXT: vandpd {{.*}}(%rip), %ymm0, %ymm0
|
||||
; X64-NEXT: retq
|
||||
%t = call <4 x double> @llvm.fabs.v4f64(<4 x double> %p)
|
||||
ret <4 x double> %t
|
||||
}
|
||||
declare <4 x double> @llvm.fabs.v4f64(<4 x double> %p)
|
||||
|
||||
define <8 x float> @fabs_v8f32(<8 x float> %p)
|
||||
{
|
||||
; CHECK-LABEL: fabs_v8f32
|
||||
; CHECK: vandps
|
||||
define <8 x float> @fabs_v8f32(<8 x float> %p) {
|
||||
; X32-LABEL: fabs_v8f32:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: vandps .LCPI3_0, %ymm0, %ymm0
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: fabs_v8f32:
|
||||
; X64: # BB#0:
|
||||
; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
||||
; X64-NEXT: retq
|
||||
%t = call <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
|
||||
ret <8 x float> %t
|
||||
}
|
||||
|
@ -44,7 +71,7 @@ declare <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
|
|||
; that has the sign bits turned off.
|
||||
;
|
||||
; So instead of something like this:
|
||||
; movabsq (constant pool load of mask for sign bits)
|
||||
; movabsq (constant pool load of mask for sign bits)
|
||||
; vmovq (move from integer register to vector/fp register)
|
||||
; vandps (mask off sign bits)
|
||||
; vmovq (move vector/fp register back to integer return register)
|
||||
|
@ -53,9 +80,16 @@ declare <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
|
|||
; mov (put constant value in return register)
|
||||
|
||||
define i64 @fabs_v2f32_1() {
|
||||
; CHECK-LABEL: fabs_v2f32_1:
|
||||
; CHECK: movabsq $9223372032559808512, %rax # imm = 0x7FFFFFFF00000000
|
||||
; CHECK-NEXT: retq
|
||||
; X32-LABEL: fabs_v2f32_1:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: xorl %eax, %eax
|
||||
; X32-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: fabs_v2f32_1:
|
||||
; X64: # BB#0:
|
||||
; X64-NEXT: movabsq $9223372032559808512, %rax # imm = 0x7FFFFFFF00000000
|
||||
; X64-NEXT: retq
|
||||
%bitcast = bitcast i64 18446744069414584320 to <2 x float> ; 0xFFFF_FFFF_0000_0000
|
||||
%fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %bitcast)
|
||||
%ret = bitcast <2 x float> %fabs to i64
|
||||
|
@ -63,9 +97,16 @@ define i64 @fabs_v2f32_1() {
|
|||
}
|
||||
|
||||
define i64 @fabs_v2f32_2() {
|
||||
; CHECK-LABEL: fabs_v2f32_2:
|
||||
; CHECK: movl $2147483647, %eax # imm = 0x7FFFFFFF
|
||||
; CHECK-NEXT: retq
|
||||
; X32-LABEL: fabs_v2f32_2:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
|
||||
; X32-NEXT: xorl %edx, %edx
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: fabs_v2f32_2:
|
||||
; X64: # BB#0:
|
||||
; X64-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
|
||||
; X64-NEXT: retq
|
||||
%bitcast = bitcast i64 4294967295 to <2 x float> ; 0x0000_0000_FFFF_FFFF
|
||||
%fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %bitcast)
|
||||
%ret = bitcast <2 x float> %fabs to i64
|
||||
|
|
|
@ -1,181 +1,312 @@
|
|||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86 -mcpu=corei7-avx | FileCheck %s
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=AVX
|
||||
|
||||
|
||||
define <2 x double> @floor_v2f64(<2 x double> %p)
|
||||
{
|
||||
; CHECK: floor_v2f64
|
||||
; CHECK: vroundpd
|
||||
define <2 x double> @floor_v2f64(<2 x double> %p) {
|
||||
; SSE41-LABEL: floor_v2f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundpd $9, %xmm0, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: floor_v2f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundpd $9, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <2 x double> @llvm.floor.v2f64(<2 x double> %p)
|
||||
ret <2 x double> %t
|
||||
}
|
||||
declare <2 x double> @llvm.floor.v2f64(<2 x double> %p)
|
||||
|
||||
define <4 x float> @floor_v4f32(<4 x float> %p)
|
||||
{
|
||||
; CHECK: floor_v4f32
|
||||
; CHECK: vroundps
|
||||
define <4 x float> @floor_v4f32(<4 x float> %p) {
|
||||
; SSE41-LABEL: floor_v4f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundps $9, %xmm0, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: floor_v4f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundps $9, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x float> @llvm.floor.v4f32(<4 x float> %p)
|
||||
ret <4 x float> %t
|
||||
}
|
||||
declare <4 x float> @llvm.floor.v4f32(<4 x float> %p)
|
||||
|
||||
define <4 x double> @floor_v4f64(<4 x double> %p)
|
||||
{
|
||||
; CHECK: floor_v4f64
|
||||
; CHECK: vroundpd
|
||||
define <4 x double> @floor_v4f64(<4 x double> %p){
|
||||
; SSE41-LABEL: floor_v4f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundpd $9, %xmm0, %xmm0
|
||||
; SSE41-NEXT: roundpd $9, %xmm1, %xmm1
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: floor_v4f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundpd $9, %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x double> @llvm.floor.v4f64(<4 x double> %p)
|
||||
ret <4 x double> %t
|
||||
}
|
||||
declare <4 x double> @llvm.floor.v4f64(<4 x double> %p)
|
||||
|
||||
define <8 x float> @floor_v8f32(<8 x float> %p)
|
||||
{
|
||||
; CHECK: floor_v8f32
|
||||
; CHECK: vroundps
|
||||
define <8 x float> @floor_v8f32(<8 x float> %p) {
|
||||
; SSE41-LABEL: floor_v8f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundps $9, %xmm0, %xmm0
|
||||
; SSE41-NEXT: roundps $9, %xmm1, %xmm1
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: floor_v8f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundps $9, %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <8 x float> @llvm.floor.v8f32(<8 x float> %p)
|
||||
ret <8 x float> %t
|
||||
}
|
||||
declare <8 x float> @llvm.floor.v8f32(<8 x float> %p)
|
||||
|
||||
define <2 x double> @ceil_v2f64(<2 x double> %p)
|
||||
{
|
||||
; CHECK: ceil_v2f64
|
||||
; CHECK: vroundpd
|
||||
define <2 x double> @ceil_v2f64(<2 x double> %p) {
|
||||
; SSE41-LABEL: ceil_v2f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundpd $10, %xmm0, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: ceil_v2f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundpd $10, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <2 x double> @llvm.ceil.v2f64(<2 x double> %p)
|
||||
ret <2 x double> %t
|
||||
}
|
||||
declare <2 x double> @llvm.ceil.v2f64(<2 x double> %p)
|
||||
|
||||
define <4 x float> @ceil_v4f32(<4 x float> %p)
|
||||
{
|
||||
; CHECK: ceil_v4f32
|
||||
; CHECK: vroundps
|
||||
define <4 x float> @ceil_v4f32(<4 x float> %p) {
|
||||
; SSE41-LABEL: ceil_v4f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundps $10, %xmm0, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: ceil_v4f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundps $10, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x float> @llvm.ceil.v4f32(<4 x float> %p)
|
||||
ret <4 x float> %t
|
||||
}
|
||||
declare <4 x float> @llvm.ceil.v4f32(<4 x float> %p)
|
||||
|
||||
define <4 x double> @ceil_v4f64(<4 x double> %p)
|
||||
{
|
||||
; CHECK: ceil_v4f64
|
||||
; CHECK: vroundpd
|
||||
define <4 x double> @ceil_v4f64(<4 x double> %p) {
|
||||
; SSE41-LABEL: ceil_v4f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundpd $10, %xmm0, %xmm0
|
||||
; SSE41-NEXT: roundpd $10, %xmm1, %xmm1
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: ceil_v4f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundpd $10, %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x double> @llvm.ceil.v4f64(<4 x double> %p)
|
||||
ret <4 x double> %t
|
||||
}
|
||||
declare <4 x double> @llvm.ceil.v4f64(<4 x double> %p)
|
||||
|
||||
define <8 x float> @ceil_v8f32(<8 x float> %p)
|
||||
{
|
||||
; CHECK: ceil_v8f32
|
||||
; CHECK: vroundps
|
||||
define <8 x float> @ceil_v8f32(<8 x float> %p) {
|
||||
; SSE41-LABEL: ceil_v8f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundps $10, %xmm0, %xmm0
|
||||
; SSE41-NEXT: roundps $10, %xmm1, %xmm1
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: ceil_v8f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundps $10, %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <8 x float> @llvm.ceil.v8f32(<8 x float> %p)
|
||||
ret <8 x float> %t
|
||||
}
|
||||
declare <8 x float> @llvm.ceil.v8f32(<8 x float> %p)
|
||||
|
||||
define <2 x double> @trunc_v2f64(<2 x double> %p)
|
||||
{
|
||||
; CHECK: trunc_v2f64
|
||||
; CHECK: vroundpd
|
||||
define <2 x double> @trunc_v2f64(<2 x double> %p) {
|
||||
; SSE41-LABEL: trunc_v2f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: trunc_v2f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundpd $11, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <2 x double> @llvm.trunc.v2f64(<2 x double> %p)
|
||||
ret <2 x double> %t
|
||||
}
|
||||
declare <2 x double> @llvm.trunc.v2f64(<2 x double> %p)
|
||||
|
||||
define <4 x float> @trunc_v4f32(<4 x float> %p)
|
||||
{
|
||||
; CHECK: trunc_v4f32
|
||||
; CHECK: vroundps
|
||||
define <4 x float> @trunc_v4f32(<4 x float> %p) {
|
||||
; SSE41-LABEL: trunc_v4f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: trunc_v4f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundps $11, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x float> @llvm.trunc.v4f32(<4 x float> %p)
|
||||
ret <4 x float> %t
|
||||
}
|
||||
declare <4 x float> @llvm.trunc.v4f32(<4 x float> %p)
|
||||
|
||||
define <4 x double> @trunc_v4f64(<4 x double> %p)
|
||||
{
|
||||
; CHECK: trunc_v4f64
|
||||
; CHECK: vroundpd
|
||||
define <4 x double> @trunc_v4f64(<4 x double> %p) {
|
||||
; SSE41-LABEL: trunc_v4f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
|
||||
; SSE41-NEXT: roundpd $11, %xmm1, %xmm1
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: trunc_v4f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundpd $11, %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x double> @llvm.trunc.v4f64(<4 x double> %p)
|
||||
ret <4 x double> %t
|
||||
}
|
||||
declare <4 x double> @llvm.trunc.v4f64(<4 x double> %p)
|
||||
|
||||
define <8 x float> @trunc_v8f32(<8 x float> %p)
|
||||
{
|
||||
; CHECK: trunc_v8f32
|
||||
; CHECK: vroundps
|
||||
define <8 x float> @trunc_v8f32(<8 x float> %p) {
|
||||
; SSE41-LABEL: trunc_v8f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
|
||||
; SSE41-NEXT: roundps $11, %xmm1, %xmm1
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: trunc_v8f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundps $11, %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <8 x float> @llvm.trunc.v8f32(<8 x float> %p)
|
||||
ret <8 x float> %t
|
||||
}
|
||||
declare <8 x float> @llvm.trunc.v8f32(<8 x float> %p)
|
||||
|
||||
define <2 x double> @rint_v2f64(<2 x double> %p)
|
||||
{
|
||||
; CHECK: rint_v2f64
|
||||
; CHECK: vroundpd
|
||||
define <2 x double> @rint_v2f64(<2 x double> %p) {
|
||||
; SSE41-LABEL: rint_v2f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundpd $4, %xmm0, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: rint_v2f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundpd $4, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <2 x double> @llvm.rint.v2f64(<2 x double> %p)
|
||||
ret <2 x double> %t
|
||||
}
|
||||
declare <2 x double> @llvm.rint.v2f64(<2 x double> %p)
|
||||
|
||||
define <4 x float> @rint_v4f32(<4 x float> %p)
|
||||
{
|
||||
; CHECK: rint_v4f32
|
||||
; CHECK: vroundps
|
||||
define <4 x float> @rint_v4f32(<4 x float> %p) {
|
||||
; SSE41-LABEL: rint_v4f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundps $4, %xmm0, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: rint_v4f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundps $4, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x float> @llvm.rint.v4f32(<4 x float> %p)
|
||||
ret <4 x float> %t
|
||||
}
|
||||
declare <4 x float> @llvm.rint.v4f32(<4 x float> %p)
|
||||
|
||||
define <4 x double> @rint_v4f64(<4 x double> %p)
|
||||
{
|
||||
; CHECK: rint_v4f64
|
||||
; CHECK: vroundpd
|
||||
define <4 x double> @rint_v4f64(<4 x double> %p) {
|
||||
; SSE41-LABEL: rint_v4f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundpd $4, %xmm0, %xmm0
|
||||
; SSE41-NEXT: roundpd $4, %xmm1, %xmm1
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: rint_v4f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundpd $4, %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x double> @llvm.rint.v4f64(<4 x double> %p)
|
||||
ret <4 x double> %t
|
||||
}
|
||||
declare <4 x double> @llvm.rint.v4f64(<4 x double> %p)
|
||||
|
||||
define <8 x float> @rint_v8f32(<8 x float> %p)
|
||||
{
|
||||
; CHECK: rint_v8f32
|
||||
; CHECK: vroundps
|
||||
define <8 x float> @rint_v8f32(<8 x float> %p) {
|
||||
; SSE41-LABEL: rint_v8f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundps $4, %xmm0, %xmm0
|
||||
; SSE41-NEXT: roundps $4, %xmm1, %xmm1
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: rint_v8f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundps $4, %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <8 x float> @llvm.rint.v8f32(<8 x float> %p)
|
||||
ret <8 x float> %t
|
||||
}
|
||||
declare <8 x float> @llvm.rint.v8f32(<8 x float> %p)
|
||||
|
||||
define <2 x double> @nearbyint_v2f64(<2 x double> %p)
|
||||
{
|
||||
; CHECK: nearbyint_v2f64
|
||||
; CHECK: vroundpd
|
||||
define <2 x double> @nearbyint_v2f64(<2 x double> %p) {
|
||||
; SSE41-LABEL: nearbyint_v2f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundpd $12, %xmm0, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: nearbyint_v2f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundpd $12, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p)
|
||||
ret <2 x double> %t
|
||||
}
|
||||
declare <2 x double> @llvm.nearbyint.v2f64(<2 x double> %p)
|
||||
|
||||
define <4 x float> @nearbyint_v4f32(<4 x float> %p)
|
||||
{
|
||||
; CHECK: nearbyint_v4f32
|
||||
; CHECK: vroundps
|
||||
define <4 x float> @nearbyint_v4f32(<4 x float> %p) {
|
||||
; SSE41-LABEL: nearbyint_v4f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundps $12, %xmm0, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: nearbyint_v4f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundps $12, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p)
|
||||
ret <4 x float> %t
|
||||
}
|
||||
declare <4 x float> @llvm.nearbyint.v4f32(<4 x float> %p)
|
||||
|
||||
define <4 x double> @nearbyint_v4f64(<4 x double> %p)
|
||||
{
|
||||
; CHECK: nearbyint_v4f64
|
||||
; CHECK: vroundpd
|
||||
define <4 x double> @nearbyint_v4f64(<4 x double> %p) {
|
||||
; SSE41-LABEL: nearbyint_v4f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundpd $12, %xmm0, %xmm0
|
||||
; SSE41-NEXT: roundpd $12, %xmm1, %xmm1
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: nearbyint_v4f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundpd $12, %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p)
|
||||
ret <4 x double> %t
|
||||
}
|
||||
declare <4 x double> @llvm.nearbyint.v4f64(<4 x double> %p)
|
||||
|
||||
define <8 x float> @nearbyint_v8f32(<8 x float> %p)
|
||||
{
|
||||
; CHECK: nearbyint_v8f32
|
||||
; CHECK: vroundps
|
||||
define <8 x float> @nearbyint_v8f32(<8 x float> %p) {
|
||||
; SSE41-LABEL: nearbyint_v8f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: roundps $12, %xmm0, %xmm0
|
||||
; SSE41-NEXT: roundps $12, %xmm1, %xmm1
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: nearbyint_v8f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vroundps $12, %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
%t = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p)
|
||||
ret <8 x float> %t
|
||||
}
|
||||
|
@ -186,43 +317,85 @@ declare <8 x float> @llvm.nearbyint.v8f32(<8 x float> %p)
|
|||
;
|
||||
|
||||
define <2 x double> @const_floor_v2f64() {
|
||||
; CHECK: const_floor_v2f64
|
||||
; CHECK: movaps {{.*#+}} xmm0 = [-2.000000e+00,2.000000e+00]
|
||||
; SSE41-LABEL: const_floor_v2f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-2.000000e+00,2.000000e+00]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: const_floor_v2f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-2.000000e+00,2.000000e+00]
|
||||
; AVX-NEXT: retq
|
||||
%t = call <2 x double> @llvm.floor.v2f64(<2 x double> <double -1.5, double 2.5>)
|
||||
ret <2 x double> %t
|
||||
}
|
||||
|
||||
define <4 x float> @const_floor_v4f32() {
|
||||
; CHECK: const_floor_v4f32
|
||||
; CHECK: movaps {{.*#+}} xmm0 = [-4.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
|
||||
; SSE41-LABEL: const_floor_v4f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-4.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: const_floor_v4f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-4.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x float> @llvm.floor.v4f32(<4 x float> <float -3.5, float 6.0, float -9.0, float 2.5>)
|
||||
ret <4 x float> %t
|
||||
}
|
||||
|
||||
define <2 x double> @const_ceil_v2f64() {
|
||||
; CHECK: const_ceil_v2f64
|
||||
; CHECK: movaps {{.*#+}} xmm0 = [-1.000000e+00,3.000000e+00]
|
||||
; SSE41-LABEL: const_ceil_v2f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-1.000000e+00,3.000000e+00]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: const_ceil_v2f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-1.000000e+00,3.000000e+00]
|
||||
; AVX-NEXT: retq
|
||||
%t = call <2 x double> @llvm.ceil.v2f64(<2 x double> <double -1.5, double 2.5>)
|
||||
ret <2 x double> %t
|
||||
}
|
||||
|
||||
define <4 x float> @const_ceil_v4f32() {
|
||||
; CHECK: const_ceil_v4f32
|
||||
; CHECK: movaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,3.000000e+00]
|
||||
; SSE41-LABEL: const_ceil_v4f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,3.000000e+00]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: const_ceil_v4f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,3.000000e+00]
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x float> @llvm.ceil.v4f32(<4 x float> <float -3.5, float 6.0, float -9.0, float 2.5>)
|
||||
ret <4 x float> %t
|
||||
}
|
||||
|
||||
define <2 x double> @const_trunc_v2f64() {
|
||||
; CHECK: const_trunc_v2f64
|
||||
; CHECK: movaps {{.*#+}} xmm0 = [-1.000000e+00,2.000000e+00]
|
||||
; SSE41-LABEL: const_trunc_v2f64:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-1.000000e+00,2.000000e+00]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: const_trunc_v2f64:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-1.000000e+00,2.000000e+00]
|
||||
; AVX-NEXT: retq
|
||||
%t = call <2 x double> @llvm.trunc.v2f64(<2 x double> <double -1.5, double 2.5>)
|
||||
ret <2 x double> %t
|
||||
}
|
||||
|
||||
define <4 x float> @const_trunc_v4f32() {
|
||||
; CHECK: const_trunc_v4f32
|
||||
; CHECK: movaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
|
||||
; SSE41-LABEL: const_trunc_v4f32:
|
||||
; SSE41: ## BB#0:
|
||||
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: const_trunc_v4f32:
|
||||
; AVX: ## BB#0:
|
||||
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [-3.000000e+00,6.000000e+00,-9.000000e+00,2.000000e+00]
|
||||
; AVX-NEXT: retq
|
||||
%t = call <4 x float> @llvm.trunc.v4f32(<4 x float> <float -3.5, float 6.0, float -9.0, float 2.5>)
|
||||
ret <4 x float> %t
|
||||
}
|
||||
|
|
|
@ -1,25 +1,43 @@
|
|||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse | FileCheck %s
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse | FileCheck %s --check-prefix=X32-SSE --check-prefix=X32-SSE1
|
||||
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE --check-prefix=X32-SSE2
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-sse2 | FileCheck %s --check-prefix=X64-SSE --check-prefix=X64-SSE1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE --check-prefix=X64-SSE2
|
||||
|
||||
; FNEG is defined as subtraction from -0.0.
|
||||
|
||||
; This test verifies that we use an xor with a constant to flip the sign bits; no subtraction needed.
|
||||
define <4 x float> @t1(<4 x float> %Q) {
|
||||
; CHECK-LABEL: t1:
|
||||
; CHECK: xorps {{.*}}LCPI0_0{{.*}}, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
%tmp = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %Q
|
||||
ret <4 x float> %tmp
|
||||
define <4 x float> @t1(<4 x float> %Q) nounwind {
|
||||
; X32-SSE-LABEL: t1:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: xorps .LCPI0_0, %xmm0
|
||||
; X32-SSE-NEXT: retl
|
||||
;
|
||||
; X64-SSE-LABEL: t1:
|
||||
; X64-SSE: # BB#0:
|
||||
; X64-SSE-NEXT: xorps {{.*}}(%rip), %xmm0
|
||||
; X64-SSE-NEXT: retq
|
||||
%tmp = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %Q
|
||||
ret <4 x float> %tmp
|
||||
}
|
||||
|
||||
; This test verifies that we generate an FP subtraction because "0.0 - x" is not an fneg.
|
||||
define <4 x float> @t2(<4 x float> %Q) {
|
||||
; CHECK-LABEL: t2:
|
||||
; CHECK: xorps %[[X:xmm[0-9]+]], %[[X]]
|
||||
; CHECK-NEXT: subps %xmm0, %[[X]]
|
||||
; CHECK-NEXT: movaps %[[X]], %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
%tmp = fsub <4 x float> zeroinitializer, %Q
|
||||
ret <4 x float> %tmp
|
||||
define <4 x float> @t2(<4 x float> %Q) nounwind {
|
||||
; X32-SSE-LABEL: t2:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: xorps %xmm1, %xmm1
|
||||
; X32-SSE-NEXT: subps %xmm0, %xmm1
|
||||
; X32-SSE-NEXT: movaps %xmm1, %xmm0
|
||||
; X32-SSE-NEXT: retl
|
||||
;
|
||||
; X64-SSE-LABEL: t2:
|
||||
; X64-SSE: # BB#0:
|
||||
; X64-SSE-NEXT: xorps %xmm1, %xmm1
|
||||
; X64-SSE-NEXT: subps %xmm0, %xmm1
|
||||
; X64-SSE-NEXT: movaps %xmm1, %xmm0
|
||||
; X64-SSE-NEXT: retq
|
||||
%tmp = fsub <4 x float> zeroinitializer, %Q
|
||||
ret <4 x float> %tmp
|
||||
}
|
||||
|
||||
; If we're bitcasting an integer to an FP vector, we should avoid the FPU/vector unit entirely.
|
||||
|
@ -31,14 +49,51 @@ define <4 x float> @t2(<4 x float> %Q) {
|
|||
; We should generate:
|
||||
; movabsq (put sign bit mask in integer register))
|
||||
; xorq (flip sign bits)
|
||||
; movd (move to xmm return register)
|
||||
; movd (move to xmm return register)
|
||||
|
||||
define <2 x float> @fneg_bitcast(i64 %i) {
|
||||
; CHECK-LABEL: fneg_bitcast:
|
||||
; CHECK: movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
|
||||
; CHECK-NEXT: xorq %rdi, %rax
|
||||
; CHECK-NEXT: movd %rax, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
define <2 x float> @fneg_bitcast(i64 %i) nounwind {
|
||||
; X32-SSE1-LABEL: fneg_bitcast:
|
||||
; X32-SSE1: # BB#0:
|
||||
; X32-SSE1-NEXT: pushl %ebp
|
||||
; X32-SSE1-NEXT: movl %esp, %ebp
|
||||
; X32-SSE1-NEXT: andl $-16, %esp
|
||||
; X32-SSE1-NEXT: subl $32, %esp
|
||||
; X32-SSE1-NEXT: movl $-2147483648, %eax # imm = 0xFFFFFFFF80000000
|
||||
; X32-SSE1-NEXT: movl 12(%ebp), %ecx
|
||||
; X32-SSE1-NEXT: xorl %eax, %ecx
|
||||
; X32-SSE1-NEXT: movl %ecx, {{[0-9]+}}(%esp)
|
||||
; X32-SSE1-NEXT: xorl 8(%ebp), %eax
|
||||
; X32-SSE1-NEXT: movl %eax, (%esp)
|
||||
; X32-SSE1-NEXT: movaps (%esp), %xmm0
|
||||
; X32-SSE1-NEXT: movl %ebp, %esp
|
||||
; X32-SSE1-NEXT: popl %ebp
|
||||
; X32-SSE1-NEXT: retl
|
||||
;
|
||||
; X32-SSE2-LABEL: fneg_bitcast:
|
||||
; X32-SSE2: # BB#0:
|
||||
; X32-SSE2-NEXT: movl $-2147483648, %eax # imm = 0xFFFFFFFF80000000
|
||||
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-SSE2-NEXT: xorl %eax, %ecx
|
||||
; X32-SSE2-NEXT: xorl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE2-NEXT: movd %eax, %xmm1
|
||||
; X32-SSE2-NEXT: movd %ecx, %xmm0
|
||||
; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X32-SSE2-NEXT: retl
|
||||
;
|
||||
; X64-SSE1-LABEL: fneg_bitcast:
|
||||
; X64-SSE1: # BB#0:
|
||||
; X64-SSE1-NEXT: movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
|
||||
; X64-SSE1-NEXT: xorq %rdi, %rax
|
||||
; X64-SSE1-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
|
||||
; X64-SSE1-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
|
||||
; X64-SSE1-NEXT: retq
|
||||
;
|
||||
; X64-SSE2-LABEL: fneg_bitcast:
|
||||
; X64-SSE2: # BB#0:
|
||||
; X64-SSE2-NEXT: movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
|
||||
; X64-SSE2-NEXT: xorq %rdi, %rax
|
||||
; X64-SSE2-NEXT: movd %rax, %xmm0
|
||||
; X64-SSE2-NEXT: retq
|
||||
%bitcast = bitcast i64 %i to <2 x float>
|
||||
%fneg = fsub <2 x float> <float -0.0, float -0.0>, %bitcast
|
||||
ret <2 x float> %fneg
|
||||
|
|
|
@ -1,24 +1,38 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
|
||||
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
|
||||
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32-SSE
|
||||
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32-AVX
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64-SSE
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
|
||||
|
||||
; PR11674
|
||||
define void @fpext_frommem(<2 x float>* %in, <2 x double>* %out) {
|
||||
; SSE-LABEL: fpext_frommem:
|
||||
; SSE: # BB#0: # %entry
|
||||
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; SSE-NEXT: cvtps2pd (%ecx), %xmm0
|
||||
; SSE-NEXT: movups %xmm0, (%eax)
|
||||
; SSE-NEXT: retl
|
||||
; X32-SSE-LABEL: fpext_frommem:
|
||||
; X32-SSE: # BB#0: # %entry
|
||||
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-SSE-NEXT: cvtps2pd (%ecx), %xmm0
|
||||
; X32-SSE-NEXT: movups %xmm0, (%eax)
|
||||
; X32-SSE-NEXT: retl
|
||||
;
|
||||
; AVX-LABEL: fpext_frommem:
|
||||
; AVX: # BB#0: # %entry
|
||||
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; AVX-NEXT: vcvtps2pd (%ecx), %xmm0
|
||||
; AVX-NEXT: vmovups %xmm0, (%eax)
|
||||
; AVX-NEXT: retl
|
||||
; X32-AVX-LABEL: fpext_frommem:
|
||||
; X32-AVX: # BB#0: # %entry
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-AVX-NEXT: vcvtps2pd (%ecx), %xmm0
|
||||
; X32-AVX-NEXT: vmovups %xmm0, (%eax)
|
||||
; X32-AVX-NEXT: retl
|
||||
;
|
||||
; X64-SSE-LABEL: fpext_frommem:
|
||||
; X64-SSE: # BB#0: # %entry
|
||||
; X64-SSE-NEXT: cvtps2pd (%rdi), %xmm0
|
||||
; X64-SSE-NEXT: movups %xmm0, (%rsi)
|
||||
; X64-SSE-NEXT: retq
|
||||
;
|
||||
; X64-AVX-LABEL: fpext_frommem:
|
||||
; X64-AVX: # BB#0: # %entry
|
||||
; X64-AVX-NEXT: vcvtps2pd (%rdi), %xmm0
|
||||
; X64-AVX-NEXT: vmovups %xmm0, (%rsi)
|
||||
; X64-AVX-NEXT: retq
|
||||
entry:
|
||||
%0 = load <2 x float>, <2 x float>* %in, align 8
|
||||
%1 = fpext <2 x float> %0 to <2 x double>
|
||||
|
@ -27,24 +41,39 @@ entry:
|
|||
}
|
||||
|
||||
define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
|
||||
; SSE-LABEL: fpext_frommem4:
|
||||
; SSE: # BB#0: # %entry
|
||||
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; SSE-NEXT: cvtps2pd (%ecx), %xmm0
|
||||
; SSE-NEXT: cvtps2pd 8(%ecx), %xmm1
|
||||
; SSE-NEXT: movups %xmm1, 16(%eax)
|
||||
; SSE-NEXT: movups %xmm0, (%eax)
|
||||
; SSE-NEXT: retl
|
||||
; X32-SSE-LABEL: fpext_frommem4:
|
||||
; X32-SSE: # BB#0: # %entry
|
||||
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-SSE-NEXT: cvtps2pd (%ecx), %xmm0
|
||||
; X32-SSE-NEXT: cvtps2pd 8(%ecx), %xmm1
|
||||
; X32-SSE-NEXT: movups %xmm1, 16(%eax)
|
||||
; X32-SSE-NEXT: movups %xmm0, (%eax)
|
||||
; X32-SSE-NEXT: retl
|
||||
;
|
||||
; AVX-LABEL: fpext_frommem4:
|
||||
; AVX: # BB#0: # %entry
|
||||
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; AVX-NEXT: vcvtps2pd (%ecx), %ymm0
|
||||
; AVX-NEXT: vmovups %ymm0, (%eax)
|
||||
; AVX-NEXT: vzeroupper
|
||||
; AVX-NEXT: retl
|
||||
; X32-AVX-LABEL: fpext_frommem4:
|
||||
; X32-AVX: # BB#0: # %entry
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-AVX-NEXT: vcvtps2pd (%ecx), %ymm0
|
||||
; X32-AVX-NEXT: vmovups %ymm0, (%eax)
|
||||
; X32-AVX-NEXT: vzeroupper
|
||||
; X32-AVX-NEXT: retl
|
||||
;
|
||||
; X64-SSE-LABEL: fpext_frommem4:
|
||||
; X64-SSE: # BB#0: # %entry
|
||||
; X64-SSE-NEXT: cvtps2pd (%rdi), %xmm0
|
||||
; X64-SSE-NEXT: cvtps2pd 8(%rdi), %xmm1
|
||||
; X64-SSE-NEXT: movups %xmm1, 16(%rsi)
|
||||
; X64-SSE-NEXT: movups %xmm0, (%rsi)
|
||||
; X64-SSE-NEXT: retq
|
||||
;
|
||||
; X64-AVX-LABEL: fpext_frommem4:
|
||||
; X64-AVX: # BB#0: # %entry
|
||||
; X64-AVX-NEXT: vcvtps2pd (%rdi), %ymm0
|
||||
; X64-AVX-NEXT: vmovups %ymm0, (%rsi)
|
||||
; X64-AVX-NEXT: vzeroupper
|
||||
; X64-AVX-NEXT: retq
|
||||
entry:
|
||||
%0 = load <4 x float>, <4 x float>* %in
|
||||
%1 = fpext <4 x float> %0 to <4 x double>
|
||||
|
@ -53,30 +82,51 @@ entry:
|
|||
}
|
||||
|
||||
define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
|
||||
; SSE-LABEL: fpext_frommem8:
|
||||
; SSE: # BB#0: # %entry
|
||||
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; SSE-NEXT: cvtps2pd (%ecx), %xmm0
|
||||
; SSE-NEXT: cvtps2pd 8(%ecx), %xmm1
|
||||
; SSE-NEXT: cvtps2pd 16(%ecx), %xmm2
|
||||
; SSE-NEXT: cvtps2pd 24(%ecx), %xmm3
|
||||
; SSE-NEXT: movups %xmm3, 48(%eax)
|
||||
; SSE-NEXT: movups %xmm2, 32(%eax)
|
||||
; SSE-NEXT: movups %xmm1, 16(%eax)
|
||||
; SSE-NEXT: movups %xmm0, (%eax)
|
||||
; SSE-NEXT: retl
|
||||
; X32-SSE-LABEL: fpext_frommem8:
|
||||
; X32-SSE: # BB#0: # %entry
|
||||
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-SSE-NEXT: cvtps2pd (%ecx), %xmm0
|
||||
; X32-SSE-NEXT: cvtps2pd 8(%ecx), %xmm1
|
||||
; X32-SSE-NEXT: cvtps2pd 16(%ecx), %xmm2
|
||||
; X32-SSE-NEXT: cvtps2pd 24(%ecx), %xmm3
|
||||
; X32-SSE-NEXT: movups %xmm3, 48(%eax)
|
||||
; X32-SSE-NEXT: movups %xmm2, 32(%eax)
|
||||
; X32-SSE-NEXT: movups %xmm1, 16(%eax)
|
||||
; X32-SSE-NEXT: movups %xmm0, (%eax)
|
||||
; X32-SSE-NEXT: retl
|
||||
;
|
||||
; AVX-LABEL: fpext_frommem8:
|
||||
; AVX: # BB#0: # %entry
|
||||
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; AVX-NEXT: vcvtps2pd (%ecx), %ymm0
|
||||
; AVX-NEXT: vcvtps2pd 16(%ecx), %ymm1
|
||||
; AVX-NEXT: vmovups %ymm1, 32(%eax)
|
||||
; AVX-NEXT: vmovups %ymm0, (%eax)
|
||||
; AVX-NEXT: vzeroupper
|
||||
; AVX-NEXT: retl
|
||||
; X32-AVX-LABEL: fpext_frommem8:
|
||||
; X32-AVX: # BB#0: # %entry
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-AVX-NEXT: vcvtps2pd (%ecx), %ymm0
|
||||
; X32-AVX-NEXT: vcvtps2pd 16(%ecx), %ymm1
|
||||
; X32-AVX-NEXT: vmovups %ymm1, 32(%eax)
|
||||
; X32-AVX-NEXT: vmovups %ymm0, (%eax)
|
||||
; X32-AVX-NEXT: vzeroupper
|
||||
; X32-AVX-NEXT: retl
|
||||
;
|
||||
; X64-SSE-LABEL: fpext_frommem8:
|
||||
; X64-SSE: # BB#0: # %entry
|
||||
; X64-SSE-NEXT: cvtps2pd (%rdi), %xmm0
|
||||
; X64-SSE-NEXT: cvtps2pd 8(%rdi), %xmm1
|
||||
; X64-SSE-NEXT: cvtps2pd 16(%rdi), %xmm2
|
||||
; X64-SSE-NEXT: cvtps2pd 24(%rdi), %xmm3
|
||||
; X64-SSE-NEXT: movups %xmm3, 48(%rsi)
|
||||
; X64-SSE-NEXT: movups %xmm2, 32(%rsi)
|
||||
; X64-SSE-NEXT: movups %xmm1, 16(%rsi)
|
||||
; X64-SSE-NEXT: movups %xmm0, (%rsi)
|
||||
; X64-SSE-NEXT: retq
|
||||
;
|
||||
; X64-AVX-LABEL: fpext_frommem8:
|
||||
; X64-AVX: # BB#0: # %entry
|
||||
; X64-AVX-NEXT: vcvtps2pd (%rdi), %ymm0
|
||||
; X64-AVX-NEXT: vcvtps2pd 16(%rdi), %ymm1
|
||||
; X64-AVX-NEXT: vmovups %ymm1, 32(%rsi)
|
||||
; X64-AVX-NEXT: vmovups %ymm0, (%rsi)
|
||||
; X64-AVX-NEXT: vzeroupper
|
||||
; X64-AVX-NEXT: retq
|
||||
entry:
|
||||
%0 = load <8 x float>, <8 x float>* %in
|
||||
%1 = fpext <8 x float> %0 to <8 x double>
|
||||
|
@ -85,15 +135,25 @@ entry:
|
|||
}
|
||||
|
||||
define <2 x double> @fpext_fromconst() {
|
||||
; SSE-LABEL: fpext_fromconst:
|
||||
; SSE: # BB#0: # %entry
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
|
||||
; SSE-NEXT: retl
|
||||
; X32-SSE-LABEL: fpext_fromconst:
|
||||
; X32-SSE: # BB#0: # %entry
|
||||
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
|
||||
; X32-SSE-NEXT: retl
|
||||
;
|
||||
; AVX-LABEL: fpext_fromconst:
|
||||
; AVX: # BB#0: # %entry
|
||||
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
|
||||
; AVX-NEXT: retl
|
||||
; X32-AVX-LABEL: fpext_fromconst:
|
||||
; X32-AVX: # BB#0: # %entry
|
||||
; X32-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
|
||||
; X32-AVX-NEXT: retl
|
||||
;
|
||||
; X64-SSE-LABEL: fpext_fromconst:
|
||||
; X64-SSE: # BB#0: # %entry
|
||||
; X64-SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
|
||||
; X64-SSE-NEXT: retq
|
||||
;
|
||||
; X64-AVX-LABEL: fpext_fromconst:
|
||||
; X64-AVX: # BB#0: # %entry
|
||||
; X64-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.000000e+00,-2.000000e+00]
|
||||
; X64-AVX-NEXT: retq
|
||||
entry:
|
||||
%0 = insertelement <2 x float> undef, float 1.0, i32 0
|
||||
%1 = insertelement <2 x float> %0, float -2.0, i32 1
|
||||
|
|
Loading…
Reference in New Issue