From d34b128fbca631ac9f71aaff0a317cdcd3046d81 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Tue, 9 Aug 2016 21:07:17 +0000 Subject: [PATCH] add test cases for missed vselect optimizations (PR28895) llvm-svn: 278165 --- llvm/test/CodeGen/X86/select-with-and-or.ll | 97 +++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/llvm/test/CodeGen/X86/select-with-and-or.ll b/llvm/test/CodeGen/X86/select-with-and-or.ll index cfcb546fc16f..7b82b83fcc13 100644 --- a/llvm/test/CodeGen/X86/select-with-and-or.ll +++ b/llvm/test/CodeGen/X86/select-with-and-or.ll @@ -84,3 +84,100 @@ define <4 x i32> @test7(<4 x float> %a, <4 x float> %b, <4 x i32>* %p) { ret <4 x i32> %r } +; FIXME: None of these should use vblendvpd. +; Repeat all with FP types. + +define <2 x double> @test1f(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test1f: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpnlepd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vblendvpd %xmm0, %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: retq +; + %f = fcmp ult <2 x double> %a, %b + %r = select <2 x i1> %f, <2 x double> %c, <2 x double> zeroinitializer + ret <2 x double> %r +} + +define <2 x double> @test2f(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test2f: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpnlepd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq +; + %f = fcmp ult <2 x double> %a, %b + %r = select <2 x i1> %f, <2 x double> , <2 x double> %c + ret <2 x double> %r +} + +define <2 x double> @test3f(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test3f: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpnlepd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq +; + %f = fcmp ult <2 x double> %a, %b + %r = select <2 x i1> %f, <2 x double> zeroinitializer, <2 x double> %c + ret <2 x double> %r +} + +define <2 x double> @test4f(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test4f: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpnlepd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vblendvpd %xmm0, %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: retq +; + %f = fcmp ult <2 x double> %a, %b + %r = select <2 x i1> %f, <2 x double> %c, <2 x double> + ret <2 x double> %r +} + +define <2 x double> @test5f(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test5f: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpnlepd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq +; + %f = fcmp ult <2 x double> %a, %b + %r = select <2 x i1> %f, <2 x double> , <2 x double> zeroinitializer + ret <2 x double> %r +} + +define <2 x double> @test6f(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test6f: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpnlepd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq +; + %f = fcmp ult <2 x double> %a, %b + %r = select <2 x i1> %f, <2 x double> zeroinitializer, <2 x double> + ret <2 x double> %r +} + +define <2 x double> @test7f(<2 x double> %a, <2 x double> %b, <2 x double>* %p) { +; CHECK-LABEL: test7f: +; CHECK: # BB#0: +; CHECK-NEXT: vcmpnlepd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vblendvpd %xmm0, (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq +; + %f = fcmp ult <2 x double> %a, %b + %l = load <2 x double>, <2 x double>* %p, align 16 + %r = select <2 x i1> %f, <2 x double> %l, <2 x double> zeroinitializer + ret <2 x double> %r +} +