From fe0b973fbfe97d04e796b86455748ff143a5bfef Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 15 Sep 2018 16:23:35 +0000 Subject: [PATCH] [X86] Remove an fp->int->fp domain crossing in LowerUINT_TO_FP_i64. Summary: This unfortunately adds a move, but isn't that better than going to the int domain and back? Reviewers: RKSimon Reviewed By: RKSimon Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D52134 llvm-svn: 342327 --- llvm/lib/Target/X86/X86ISelLowering.cpp | 8 +- llvm/test/CodeGen/X86/ftrunc.ll | 23 ++++-- llvm/test/CodeGen/X86/scalar-int-to-fp.ll | 6 +- llvm/test/CodeGen/X86/vec_int_to_fp.ll | 95 +++++++++++++---------- 4 files changed, 77 insertions(+), 55 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 26d21d55ba6a..14880e40881a 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -16737,13 +16737,11 @@ static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG, SDValue Result; if (Subtarget.hasSSE3()) { - // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'. + // FIXME: The 'haddpd' instruction may be slower than 'shuffle + addsd'. Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); } else { - SDValue S2F = DAG.getBitcast(MVT::v4i32, Sub); - SDValue Shuffle = DAG.getVectorShuffle(MVT::v4i32, dl, S2F, S2F, {2,3,0,1}); - Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, - DAG.getBitcast(MVT::v2f64, Shuffle), Sub); + SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1}); + Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub); } return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, diff --git a/llvm/test/CodeGen/X86/ftrunc.ll b/llvm/test/CodeGen/X86/ftrunc.ll index c0e16746a7e3..d88b11076e5b 100644 --- a/llvm/test/CodeGen/X86/ftrunc.ll +++ b/llvm/test/CodeGen/X86/ftrunc.ll @@ -41,7 +41,8 @@ define double @trunc_unsigned_f64(double %x) #0 { ; SSE2-NEXT: movq %rax, %xmm1 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] ; SSE2-NEXT: subpd {{.*}}(%rip), %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE2-NEXT: addpd %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -128,12 +129,14 @@ define <2 x double> @trunc_unsigned_v2f64(<2 x double> %x) #0 { ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: movapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] ; SSE2-NEXT: subpd %xmm3, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE2-NEXT: addpd %xmm1, %xmm0 ; SSE2-NEXT: movq %rdx, %xmm1 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: subpd %xmm3, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE2-NEXT: movapd %xmm1, %xmm2 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE2-NEXT: addpd %xmm1, %xmm2 ; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; SSE2-NEXT: retq @@ -194,23 +197,27 @@ define <4 x double> @trunc_unsigned_v4f64(<4 x double> %x) #0 { ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: movapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] ; SSE2-NEXT: subpd %xmm3, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE2-NEXT: addpd %xmm1, %xmm0 ; SSE2-NEXT: movq %rdi, %xmm1 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: subpd %xmm3, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE2-NEXT: movapd %xmm1, %xmm4 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1] ; SSE2-NEXT: addpd %xmm1, %xmm4 ; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm4[0] ; SSE2-NEXT: movq %rcx, %xmm4 ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] ; SSE2-NEXT: subpd %xmm3, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,0,1] +; SSE2-NEXT: movapd %xmm4, %xmm1 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1] ; SSE2-NEXT: addpd %xmm4, %xmm1 ; SSE2-NEXT: movq %rax, %xmm4 ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] ; SSE2-NEXT: subpd %xmm3, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,0,1] +; SSE2-NEXT: movapd %xmm4, %xmm2 +; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1] ; SSE2-NEXT: addpd %xmm4, %xmm2 ; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: retq @@ -356,7 +363,7 @@ define <4 x double> @trunc_signed_v4f64(<4 x double> %x) #0 { ret <4 x double> %r } -; The fold may be guarded to allow existing code to continue +; The fold may be guarded to allow existing code to continue ; working based on its assumptions of float->int overflow. define float @trunc_unsigned_f32_disable_via_attr(float %x) #1 { diff --git a/llvm/test/CodeGen/X86/scalar-int-to-fp.ll b/llvm/test/CodeGen/X86/scalar-int-to-fp.ll index 4fe77c60c953..0d3836c89f2d 100644 --- a/llvm/test/CodeGen/X86/scalar-int-to-fp.ll +++ b/llvm/test/CodeGen/X86/scalar-int-to-fp.ll @@ -636,7 +636,8 @@ define double @u64_to_d(i64 %a) nounwind { ; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE2_32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; SSE2_32-NEXT: subpd {{\.LCPI.*}}, %xmm0 -; SSE2_32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2_32-NEXT: movapd %xmm0, %xmm1 +; SSE2_32-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE2_32-NEXT: addpd %xmm0, %xmm1 ; SSE2_32-NEXT: movlpd %xmm1, (%esp) ; SSE2_32-NEXT: fldl (%esp) @@ -649,7 +650,8 @@ define double @u64_to_d(i64 %a) nounwind { ; SSE2_64-NEXT: movq %rdi, %xmm1 ; SSE2_64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] ; SSE2_64-NEXT: subpd {{.*}}(%rip), %xmm1 -; SSE2_64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2_64-NEXT: movapd %xmm1, %xmm0 +; SSE2_64-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE2_64-NEXT: addpd %xmm1, %xmm0 ; SSE2_64-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp.ll b/llvm/test/CodeGen/X86/vec_int_to_fp.ll index 5eda06456f5a..bd3587dba2a2 100644 --- a/llvm/test/CodeGen/X86/vec_int_to_fp.ll +++ b/llvm/test/CodeGen/X86/vec_int_to_fp.ll @@ -412,18 +412,21 @@ define <4 x double> @sitofp_16i8_to_4f64(<16 x i8> %a) { define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) { ; SSE-LABEL: uitofp_2i64_to_2f64: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0] -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE-NEXT: movapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] -; SSE-NEXT: subpd %xmm3, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] -; SSE-NEXT: addpd %xmm4, %xmm0 -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE-NEXT: subpd %xmm3, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] -; SSE-NEXT: addpd %xmm2, %xmm1 -; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE-NEXT: movapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] +; SSE-NEXT: subpd %xmm4, %xmm0 +; SSE-NEXT: movapd %xmm0, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE-NEXT: addpd %xmm0, %xmm1 +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE-NEXT: subpd %xmm4, %xmm3 +; SSE-NEXT: movapd %xmm3, %xmm0 +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1] +; SSE-NEXT: addpd %xmm3, %xmm0 +; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE-NEXT: movapd %xmm1, %xmm0 ; SSE-NEXT: retq ; ; VEX-LABEL: uitofp_2i64_to_2f64: @@ -691,28 +694,34 @@ define <2 x double> @uitofp_16i8_to_2f64(<16 x i8> %a) { define <4 x double> @uitofp_4i64_to_4f64(<4 x i64> %a) { ; SSE-LABEL: uitofp_4i64_to_4f64: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE-NEXT: movapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] -; SSE-NEXT: subpd %xmm4, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,0,1] -; SSE-NEXT: addpd %xmm5, %xmm0 -; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSE-NEXT: subpd %xmm4, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] -; SSE-NEXT: addpd %xmm3, %xmm5 -; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm5[0] -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] -; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE-NEXT: subpd %xmm4, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,0,1] -; SSE-NEXT: addpd %xmm5, %xmm1 -; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; SSE-NEXT: subpd %xmm4, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] -; SSE-NEXT: addpd %xmm3, %xmm2 -; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1127219200,1160773632,0,0] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE-NEXT: movapd {{.*#+}} xmm5 = [4.503600e+15,1.934281e+25] +; SSE-NEXT: subpd %xmm5, %xmm2 +; SSE-NEXT: movapd %xmm2, %xmm0 +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] +; SSE-NEXT: addpd %xmm2, %xmm0 +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE-NEXT: subpd %xmm5, %xmm4 +; SSE-NEXT: movapd %xmm4, %xmm2 +; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1] +; SSE-NEXT: addpd %xmm4, %xmm2 +; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE-NEXT: subpd %xmm5, %xmm1 +; SSE-NEXT: movapd %xmm1, %xmm2 +; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] +; SSE-NEXT: addpd %xmm1, %xmm2 +; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE-NEXT: subpd %xmm5, %xmm4 +; SSE-NEXT: movapd %xmm4, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1] +; SSE-NEXT: addpd %xmm4, %xmm1 +; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; SSE-NEXT: movapd %xmm2, %xmm1 ; SSE-NEXT: retq ; ; VEX-LABEL: uitofp_4i64_to_4f64: @@ -2797,11 +2806,13 @@ define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) { ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE-NEXT: movapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] ; SSE-NEXT: subpd %xmm4, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE-NEXT: movapd %xmm1, %xmm0 +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE-NEXT: addpd %xmm1, %xmm0 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] ; SSE-NEXT: subpd %xmm4, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1] +; SSE-NEXT: movapd %xmm3, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1] ; SSE-NEXT: addpd %xmm3, %xmm1 ; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: retq @@ -2963,21 +2974,25 @@ define <4 x double> @uitofp_load_4i64_to_4f64(<4 x i64> *%a) { ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; SSE-NEXT: movapd {{.*#+}} xmm5 = [4.503600e+15,1.934281e+25] ; SSE-NEXT: subpd %xmm5, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE-NEXT: movapd %xmm1, %xmm0 +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE-NEXT: addpd %xmm1, %xmm0 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; SSE-NEXT: subpd %xmm5, %xmm4 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,0,1] +; SSE-NEXT: movapd %xmm4, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1] ; SSE-NEXT: addpd %xmm4, %xmm1 ; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,3,0,1] ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE-NEXT: subpd %xmm5, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] +; SSE-NEXT: movapd %xmm2, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] ; SSE-NEXT: addpd %xmm2, %xmm1 ; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; SSE-NEXT: subpd %xmm5, %xmm4 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,0,1] +; SSE-NEXT: movapd %xmm4, %xmm2 +; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1] ; SSE-NEXT: addpd %xmm4, %xmm2 ; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE-NEXT: retq