2013-08-13 21:24:07 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
|
|
|
|
|
|
|
|
define <16 x float> @test1(<16 x float> %x, <16 x float> %y) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test1:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vcmpleps %zmm1, %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vmovaps %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = fcmp ole <16 x float> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %y
|
|
|
|
ret <16 x float> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x double> @test2(<8 x double> %x, <8 x double> %y) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test2:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vcmplepd %zmm1, %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vmovapd %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = fcmp ole <8 x double> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %y
|
|
|
|
ret <8 x double> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
2013-08-19 21:26:14 +08:00
|
|
|
define <16 x i32> @test3(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %yp) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test3:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2013-08-19 21:26:14 +08:00
|
|
|
%y = load <16 x i32>* %yp, align 4
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = icmp eq <16 x i32> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 23:10:43 +08:00
|
|
|
define <16 x i32> @test4_unsigned(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test4_unsigned:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k1
|
2014-12-12 23:10:43 +08:00
|
|
|
; CHECK-NEXT: vmovdqa32 %zmm2, %zmm1 {%k1}
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = icmp uge <16 x i32> %x, %y
|
2014-12-12 23:10:43 +08:00
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
|
2014-02-06 00:17:36 +08:00
|
|
|
ret <16 x i32> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test5(<8 x i64> %x, <8 x i64> %y) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test5:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = icmp eq <8 x i64> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %y
|
|
|
|
ret <8 x i64> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 23:10:43 +08:00
|
|
|
define <8 x i64> @test6_unsigned(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test6_unsigned:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
|
2014-12-12 23:10:43 +08:00
|
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1}
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = icmp ugt <8 x i64> %x, %y
|
2014-12-12 23:10:43 +08:00
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x1, <8 x i64> %y
|
2014-02-06 00:17:36 +08:00
|
|
|
ret <8 x i64> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test7(<4 x float> %a, <4 x float> %b) {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test7:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
|
|
|
; CHECK-NEXT: vcmpltps %xmm2, %xmm0, %xmm2
|
|
|
|
; CHECK-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
|
|
|
|
; CHECK-NEXT: retq
|
2013-08-13 21:24:07 +08:00
|
|
|
%mask = fcmp olt <4 x float> %a, zeroinitializer
|
|
|
|
%c = select <4 x i1>%mask, <4 x float>%a, <4 x float>%b
|
|
|
|
ret <4 x float>%c
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test8(<2 x double> %a, <2 x double> %b) {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test8:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
|
|
|
|
; CHECK-NEXT: vcmpltpd %xmm2, %xmm0, %xmm2
|
|
|
|
; CHECK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
|
|
|
|
; CHECK-NEXT: retq
|
2013-08-13 21:24:07 +08:00
|
|
|
%mask = fcmp olt <2 x double> %a, zeroinitializer
|
|
|
|
%c = select <2 x i1>%mask, <2 x double>%a, <2 x double>%b
|
|
|
|
ret <2 x double>%c
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test9:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: ## kill: YMM1<def> YMM1<kill> ZMM1<def>
|
|
|
|
; CHECK-NEXT: ## kill: YMM0<def> YMM0<kill> ZMM0<def>
|
|
|
|
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
|
|
; CHECK-NEXT: ## kill: YMM0<def> YMM0<kill> ZMM0<kill>
|
|
|
|
; CHECK-NEXT: retq
|
2013-08-13 21:24:07 +08:00
|
|
|
%mask = icmp eq <8 x i32> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
|
|
|
|
ret <8 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test10:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: ## kill: YMM1<def> YMM1<kill> ZMM1<def>
|
|
|
|
; CHECK-NEXT: ## kill: YMM0<def> YMM0<kill> ZMM0<def>
|
|
|
|
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
|
|
|
|
; CHECK-NEXT: ## kill: YMM0<def> YMM0<kill> ZMM0<kill>
|
|
|
|
; CHECK-NEXT: retq
|
2013-08-13 21:24:07 +08:00
|
|
|
%mask = fcmp oeq <8 x float> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x float> %x, <8 x float> %y
|
|
|
|
ret <8 x float> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @test11_unsigned(<8 x i32> %x, <8 x i32> %y) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test11_unsigned:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2013-08-13 21:24:07 +08:00
|
|
|
%mask = icmp ugt <8 x i32> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
|
|
|
|
ret <8 x i32> %max
|
|
|
|
}
|
2013-12-17 16:33:15 +08:00
|
|
|
|
|
|
|
|
|
|
|
define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test12:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
|
|
|
|
; CHECK-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
|
|
|
|
; CHECK-NEXT: kunpckbw %k0, %k1, %k0
|
|
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
|
|
; CHECK-NEXT: ## kill: AX<def> AX<kill> EAX<kill>
|
|
|
|
; CHECK-NEXT: retq
|
2013-12-17 16:33:15 +08:00
|
|
|
%res = icmp eq <16 x i64> %a, %b
|
|
|
|
%res1 = bitcast <16 x i1> %res to i16
|
|
|
|
ret i16 %res1
|
|
|
|
}
|
2014-01-22 20:26:19 +08:00
|
|
|
|
|
|
|
define <16 x i32> @test13(<16 x float>%a, <16 x float>%b)
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test13:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
|
|
|
|
; CHECK-NEXT: retq
|
2014-01-22 20:26:19 +08:00
|
|
|
{
|
|
|
|
%cmpvector_i = fcmp oeq <16 x float> %a, %b
|
|
|
|
%conv = zext <16 x i1> %cmpvector_i to <16 x i32>
|
|
|
|
ret <16 x i32> %conv
|
|
|
|
}
|
2014-02-06 00:17:36 +08:00
|
|
|
|
|
|
|
define <16 x i32> @test14(<16 x i32>%a, <16 x i32>%b) {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test14:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm1
|
|
|
|
; CHECK-NEXT: vpcmpgtd %zmm0, %zmm1, %k0
|
|
|
|
; CHECK-NEXT: knotw %k0, %k0
|
|
|
|
; CHECK-NEXT: knotw %k0, %k1
|
|
|
|
; CHECK-NEXT: vmovdqu32 %zmm1, %zmm0 {%k1} {z}
|
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%sub_r = sub <16 x i32> %a, %b
|
|
|
|
%cmp.i2.i = icmp sgt <16 x i32> %sub_r, %a
|
|
|
|
%sext.i3.i = sext <16 x i1> %cmp.i2.i to <16 x i32>
|
|
|
|
%mask = icmp eq <16 x i32> %sext.i3.i, zeroinitializer
|
|
|
|
%res = select <16 x i1> %mask, <16 x i32> zeroinitializer, <16 x i32> %sub_r
|
|
|
|
ret <16 x i32>%res
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test15(<8 x i64>%a, <8 x i64>%b) {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test15:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm1
|
|
|
|
; CHECK-NEXT: vpcmpgtq %zmm0, %zmm1, %k0
|
|
|
|
; CHECK-NEXT: knotw %k0, %k0
|
|
|
|
; CHECK-NEXT: knotw %k0, %k1
|
|
|
|
; CHECK-NEXT: vmovdqu64 %zmm1, %zmm0 {%k1} {z}
|
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%sub_r = sub <8 x i64> %a, %b
|
|
|
|
%cmp.i2.i = icmp sgt <8 x i64> %sub_r, %a
|
|
|
|
%sext.i3.i = sext <8 x i1> %cmp.i2.i to <8 x i64>
|
|
|
|
%mask = icmp eq <8 x i64> %sext.i3.i, zeroinitializer
|
|
|
|
%res = select <8 x i1> %mask, <8 x i64> zeroinitializer, <8 x i64> %sub_r
|
|
|
|
ret <8 x i64>%res
|
|
|
|
}
|
|
|
|
|
2014-12-12 23:10:43 +08:00
|
|
|
define <16 x i32> @test16(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test16:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k1
|
2014-12-12 23:10:43 +08:00
|
|
|
; CHECK-NEXT: vmovdqa32 %zmm2, %zmm1 {%k1}
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask = icmp sge <16 x i32> %x, %y
|
2014-12-12 23:10:43 +08:00
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
|
2014-08-29 16:46:04 +08:00
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test17(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test17:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%y = load <16 x i32>* %y.ptr, align 4
|
|
|
|
%mask = icmp sgt <16 x i32> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test18(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test18:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpled (%rdi), %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%y = load <16 x i32>* %y.ptr, align 4
|
|
|
|
%mask = icmp sle <16 x i32> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test19(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test19:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%y = load <16 x i32>* %y.ptr, align 4
|
|
|
|
%mask = icmp ule <16 x i32> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test20(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32> %y1) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test20:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp eq <16 x i32> %x1, %y1
|
|
|
|
%mask0 = icmp eq <16 x i32> %x, %y
|
|
|
|
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %y
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test21(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test21:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vpcmpleq %zmm2, %zmm3, %k1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm2, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp sge <8 x i64> %x1, %y1
|
|
|
|
%mask0 = icmp sle <8 x i64> %x, %y
|
|
|
|
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
|
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %x1
|
|
|
|
ret <8 x i64> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test22(<8 x i64> %x, <8 x i64>* %y.ptr, <8 x i64> %x1, <8 x i64> %y1) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test22:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpgtq %zmm2, %zmm1, %k1
|
|
|
|
; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp sgt <8 x i64> %x1, %y1
|
|
|
|
%y = load <8 x i64>* %y.ptr, align 4
|
|
|
|
%mask0 = icmp sgt <8 x i64> %x, %y
|
|
|
|
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
|
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %x1
|
|
|
|
ret <8 x i64> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test23(<16 x i32> %x, <16 x i32>* %y.ptr, <16 x i32> %x1, <16 x i32> %y1) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test23:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpled %zmm1, %zmm2, %k1
|
|
|
|
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp sge <16 x i32> %x1, %y1
|
|
|
|
%y = load <16 x i32>* %y.ptr, align 4
|
|
|
|
%mask0 = icmp ule <16 x i32> %x, %y
|
|
|
|
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test24(<8 x i64> %x, <8 x i64> %x1, i64* %yb.ptr) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test24:
|
|
|
|
; CHECK: ## BB#0:
|
[AVX512] Bring back vector-shuffle lowering support through broadcasts
Ffter commit at rev219046 512-bit broadcasts lowering become non-optimal. Most of tests on broadcasting and embedded broadcasting were changed and they doesn’t produce efficient code.
Example below is from commit changes (it’s the first test from test/CodeGen/X86/avx512-vbroadcast.ll):
define <16 x i32> @_inreg16xi32(i32 %a) {
; CHECK-LABEL: _inreg16xi32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastd %edi, %zmm0
+; CHECK-NEXT: vmovd %edi, %xmm0
+; CHECK-NEXT: vpbroadcastd %xmm0, %ymm0
+; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = insertelement <16 x i32> undef, i32 %a, i32 0
%c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
ret <16 x i32> %c
}
Here, 256-bit broadcast was generated instead of 512-bit one.
In this patch
1) I added vector-shuffle lowering through broadcasts
2) Removed asserts and branches likes because this is incorrect
- assert(Subtarget->hasDQI() && "We can only lower v8i64 with AVX-512-DQI");
3) Fixed lowering tests
llvm-svn: 220774
2014-10-28 20:28:51 +08:00
|
|
|
; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k1
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%yb = load i64* %yb.ptr, align 4
|
|
|
|
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
|
|
|
|
%y = shufflevector <8 x i64> %y.0, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
|
|
%mask = icmp eq <8 x i64> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %x1
|
|
|
|
ret <8 x i64> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test25(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test25:
|
|
|
|
; CHECK: ## BB#0:
|
[AVX512] Bring back vector-shuffle lowering support through broadcasts
Ffter commit at rev219046 512-bit broadcasts lowering become non-optimal. Most of tests on broadcasting and embedded broadcasting were changed and they doesn’t produce efficient code.
Example below is from commit changes (it’s the first test from test/CodeGen/X86/avx512-vbroadcast.ll):
define <16 x i32> @_inreg16xi32(i32 %a) {
; CHECK-LABEL: _inreg16xi32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastd %edi, %zmm0
+; CHECK-NEXT: vmovd %edi, %xmm0
+; CHECK-NEXT: vpbroadcastd %xmm0, %ymm0
+; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = insertelement <16 x i32> undef, i32 %a, i32 0
%c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
ret <16 x i32> %c
}
Here, 256-bit broadcast was generated instead of 512-bit one.
In this patch
1) I added vector-shuffle lowering through broadcasts
2) Removed asserts and branches likes because this is incorrect
- assert(Subtarget->hasDQI() && "We can only lower v8i64 with AVX-512-DQI");
3) Fixed lowering tests
llvm-svn: 220774
2014-10-28 20:28:51 +08:00
|
|
|
; CHECK-NEXT: vpcmpled (%rdi){1to16}, %zmm0, %k1
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%yb = load i32* %yb.ptr, align 4
|
|
|
|
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
|
|
|
|
%y = shufflevector <16 x i32> %y.0, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
|
|
%mask = icmp sle <16 x i32> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test26(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1, <16 x i32> %y1) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test26:
|
|
|
|
; CHECK: ## BB#0:
|
[AVX512] Bring back vector-shuffle lowering support through broadcasts
Ffter commit at rev219046 512-bit broadcasts lowering become non-optimal. Most of tests on broadcasting and embedded broadcasting were changed and they doesn’t produce efficient code.
Example below is from commit changes (it’s the first test from test/CodeGen/X86/avx512-vbroadcast.ll):
define <16 x i32> @_inreg16xi32(i32 %a) {
; CHECK-LABEL: _inreg16xi32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastd %edi, %zmm0
+; CHECK-NEXT: vmovd %edi, %xmm0
+; CHECK-NEXT: vpbroadcastd %xmm0, %ymm0
+; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = insertelement <16 x i32> undef, i32 %a, i32 0
%c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
ret <16 x i32> %c
}
Here, 256-bit broadcast was generated instead of 512-bit one.
In this patch
1) I added vector-shuffle lowering through broadcasts
2) Removed asserts and branches likes because this is incorrect
- assert(Subtarget->hasDQI() && "We can only lower v8i64 with AVX-512-DQI");
3) Fixed lowering tests
llvm-svn: 220774
2014-10-28 20:28:51 +08:00
|
|
|
; CHECK-NEXT: vpcmpled %zmm1, %zmm2, %k1
|
|
|
|
; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k1 {%k1}
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp sge <16 x i32> %x1, %y1
|
|
|
|
%yb = load i32* %yb.ptr, align 4
|
|
|
|
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
|
|
|
|
%y = shufflevector <16 x i32> %y.0, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
|
|
%mask0 = icmp sgt <16 x i32> %x, %y
|
|
|
|
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test27(<8 x i64> %x, i64* %yb.ptr, <8 x i64> %x1, <8 x i64> %y1) nounwind {
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-LABEL: test27:
|
|
|
|
; CHECK: ## BB#0:
|
[AVX512] Bring back vector-shuffle lowering support through broadcasts
Ffter commit at rev219046 512-bit broadcasts lowering become non-optimal. Most of tests on broadcasting and embedded broadcasting were changed and they doesn’t produce efficient code.
Example below is from commit changes (it’s the first test from test/CodeGen/X86/avx512-vbroadcast.ll):
define <16 x i32> @_inreg16xi32(i32 %a) {
; CHECK-LABEL: _inreg16xi32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastd %edi, %zmm0
+; CHECK-NEXT: vmovd %edi, %xmm0
+; CHECK-NEXT: vpbroadcastd %xmm0, %ymm0
+; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = insertelement <16 x i32> undef, i32 %a, i32 0
%c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
ret <16 x i32> %c
}
Here, 256-bit broadcast was generated instead of 512-bit one.
In this patch
1) I added vector-shuffle lowering through broadcasts
2) Removed asserts and branches likes because this is incorrect
- assert(Subtarget->hasDQI() && "We can only lower v8i64 with AVX-512-DQI");
3) Fixed lowering tests
llvm-svn: 220774
2014-10-28 20:28:51 +08:00
|
|
|
; CHECK-NEXT: vpcmpleq %zmm1, %zmm2, %k1
|
|
|
|
; CHECK-NEXT: vpcmpleq (%rdi){1to8}, %zmm0, %k1 {%k1}
|
2014-10-03 08:50:03 +08:00
|
|
|
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
|
|
|
|
; CHECK-NEXT: vmovaps %zmm1, %zmm0
|
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp sge <8 x i64> %x1, %y1
|
|
|
|
%yb = load i64* %yb.ptr, align 4
|
|
|
|
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
|
|
|
|
%y = shufflevector <8 x i64> %y.0, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
|
|
%mask0 = icmp sle <8 x i64> %x, %y
|
|
|
|
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
|
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %x1
|
|
|
|
ret <8 x i64> %max
|
|
|
|
}
|