diff --git a/llvm/test/CodeGen/X86/avx-cvttp2si.ll b/llvm/test/CodeGen/X86/avx-cvttp2si.ll index ab8a0d9a516d..6465aad2a372 100644 --- a/llvm/test/CodeGen/X86/avx-cvttp2si.ll +++ b/llvm/test/CodeGen/X86/avx-cvttp2si.ll @@ -2,7 +2,7 @@ ; RUN: llc < %s -mtriple=x86_64-- -mattr=avx | FileCheck %s --check-prefixes=AVX,AVX1 ; RUN: llc < %s -mtriple=x86_64-- -mattr=avx512f,avx512vl | FileCheck %s --check-prefixes=AVX,AVX512 -; PR37551 - https://bugs.llvm.org/show_bug.cgi?id=37751 +; PR37751 - https://bugs.llvm.org/show_bug.cgi?id=37751 ; We can't combine into 'round' instructions because the behavior is different for out-of-range values. declare <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float>) diff --git a/llvm/test/CodeGen/X86/sse-cvttp2si.ll b/llvm/test/CodeGen/X86/sse-cvttp2si.ll index a8b52a78e8cc..bffb9dc09978 100644 --- a/llvm/test/CodeGen/X86/sse-cvttp2si.ll +++ b/llvm/test/CodeGen/X86/sse-cvttp2si.ll @@ -3,11 +3,159 @@ ; RUN: llc < %s -mtriple=x86_64-- -mattr=avx | FileCheck %s --check-prefixes=AVX,AVX1 ; RUN: llc < %s -mtriple=x86_64-- -mattr=avx512f,avx512vl | FileCheck %s --check-prefixes=AVX,AVX512 -; PR37551 - https://bugs.llvm.org/show_bug.cgi?id=37751 +; PR37751 - https://bugs.llvm.org/show_bug.cgi?id=37751 ; We can't combine into 'round' instructions because the behavior is different for out-of-range values. declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) +declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) +declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) +declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) +declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) + +define float @float_to_int_to_float_mem_f32_i32(<4 x float>* %p) { +; SSE-LABEL: float_to_int_to_float_mem_f32_i32: +; SSE: # %bb.0: +; SSE-NEXT: cvttss2si (%rdi), %eax +; SSE-NEXT: cvtsi2ssl %eax, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: float_to_int_to_float_mem_f32_i32: +; AVX: # %bb.0: +; AVX-NEXT: vcvttss2si (%rdi), %eax +; AVX-NEXT: vcvtsi2ssl %eax, %xmm0, %xmm0 +; AVX-NEXT: retq + %x = load <4 x float>, <4 x float>* %p, align 16 + %fptosi = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %x) + %sitofp = sitofp i32 %fptosi to float + ret float %sitofp +} + +define float @float_to_int_to_float_reg_f32_i32(<4 x float> %x) { +; SSE-LABEL: float_to_int_to_float_reg_f32_i32: +; SSE: # %bb.0: +; SSE-NEXT: cvttss2si %xmm0, %eax +; SSE-NEXT: xorps %xmm0, %xmm0 +; SSE-NEXT: cvtsi2ssl %eax, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: float_to_int_to_float_reg_f32_i32: +; AVX: # %bb.0: +; AVX-NEXT: vcvttss2si %xmm0, %eax +; AVX-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm0 +; AVX-NEXT: retq + %fptosi = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %x) + %sitofp = sitofp i32 %fptosi to float + ret float %sitofp +} + +define float @float_to_int_to_float_mem_f32_i64(<4 x float>* %p) { +; SSE-LABEL: float_to_int_to_float_mem_f32_i64: +; SSE: # %bb.0: +; SSE-NEXT: cvttss2si (%rdi), %rax +; SSE-NEXT: cvtsi2ssq %rax, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: float_to_int_to_float_mem_f32_i64: +; AVX: # %bb.0: +; AVX-NEXT: vcvttss2si (%rdi), %rax +; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 +; AVX-NEXT: retq + %x = load <4 x float>, <4 x float>* %p, align 16 + %fptosi = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %x) + %sitofp = sitofp i64 %fptosi to float + ret float %sitofp +} + +define float @float_to_int_to_float_reg_f32_i64(<4 x float> %x) { +; SSE-LABEL: float_to_int_to_float_reg_f32_i64: +; SSE: # %bb.0: +; SSE-NEXT: cvttss2si %xmm0, %rax +; SSE-NEXT: xorps %xmm0, %xmm0 +; SSE-NEXT: cvtsi2ssq %rax, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: float_to_int_to_float_reg_f32_i64: +; AVX: # %bb.0: +; AVX-NEXT: vcvttss2si %xmm0, %rax +; AVX-NEXT: vcvtsi2ssq %rax, %xmm1, %xmm0 +; AVX-NEXT: retq + %fptosi = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %x) + %sitofp = sitofp i64 %fptosi to float + ret float %sitofp +} + +define double @float_to_int_to_float_mem_f64_i32(<2 x double>* %p) { +; SSE-LABEL: float_to_int_to_float_mem_f64_i32: +; SSE: # %bb.0: +; SSE-NEXT: cvttsd2si (%rdi), %eax +; SSE-NEXT: cvtsi2sdl %eax, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: float_to_int_to_float_mem_f64_i32: +; AVX: # %bb.0: +; AVX-NEXT: vcvttsd2si (%rdi), %eax +; AVX-NEXT: vcvtsi2sdl %eax, %xmm0, %xmm0 +; AVX-NEXT: retq + %x = load <2 x double>, <2 x double>* %p, align 16 + %fptosi = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %x) + %sitofp = sitofp i32 %fptosi to double + ret double %sitofp +} + +define double @float_to_int_to_float_reg_f64_i32(<2 x double> %x) { +; SSE-LABEL: float_to_int_to_float_reg_f64_i32: +; SSE: # %bb.0: +; SSE-NEXT: cvttsd2si %xmm0, %eax +; SSE-NEXT: xorps %xmm0, %xmm0 +; SSE-NEXT: cvtsi2sdl %eax, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: float_to_int_to_float_reg_f64_i32: +; AVX: # %bb.0: +; AVX-NEXT: vcvttsd2si %xmm0, %eax +; AVX-NEXT: vcvtsi2sdl %eax, %xmm1, %xmm0 +; AVX-NEXT: retq + %fptosi = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %x) + %sitofp = sitofp i32 %fptosi to double + ret double %sitofp +} + +define double @float_to_int_to_float_mem_f64_i64(<2 x double>* %p) { +; SSE-LABEL: float_to_int_to_float_mem_f64_i64: +; SSE: # %bb.0: +; SSE-NEXT: cvttsd2si (%rdi), %rax +; SSE-NEXT: cvtsi2sdq %rax, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: float_to_int_to_float_mem_f64_i64: +; AVX: # %bb.0: +; AVX-NEXT: vcvttsd2si (%rdi), %rax +; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm0 +; AVX-NEXT: retq + %x = load <2 x double>, <2 x double>* %p, align 16 + %fptosi = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %x) + %sitofp = sitofp i64 %fptosi to double + ret double %sitofp +} + +define double @float_to_int_to_float_reg_f64_i64(<2 x double> %x) { +; SSE-LABEL: float_to_int_to_float_reg_f64_i64: +; SSE: # %bb.0: +; SSE-NEXT: cvttsd2si %xmm0, %rax +; SSE-NEXT: xorps %xmm0, %xmm0 +; SSE-NEXT: cvtsi2sdq %rax, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: float_to_int_to_float_reg_f64_i64: +; AVX: # %bb.0: +; AVX-NEXT: vcvttsd2si %xmm0, %rax +; AVX-NEXT: vcvtsi2sdq %rax, %xmm1, %xmm0 +; AVX-NEXT: retq + %fptosi = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %x) + %sitofp = sitofp i64 %fptosi to double + ret double %sitofp +} define <4 x float> @float_to_int_to_float_mem_v4f32(<4 x float>* %p) { ; SSE-LABEL: float_to_int_to_float_mem_v4f32: