2016-07-09 08:19:07 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2016-05-22 08:39:33 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=KNL
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=SKX
|
2013-08-13 21:24:07 +08:00
|
|
|
|
|
|
|
define <16 x float> @test1(<16 x float> %x, <16 x float> %y) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test1:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vcmpleps %zmm1, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = fcmp ole <16 x float> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %y
|
|
|
|
ret <16 x float> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x double> @test2(<8 x double> %x, <8 x double> %y) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test2:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vcmplepd %zmm1, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = fcmp ole <8 x double> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %y
|
|
|
|
ret <8 x double> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
2013-08-19 21:26:14 +08:00
|
|
|
define <16 x i32> @test3(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %yp) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test3:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2015-02-28 05:17:42 +08:00
|
|
|
%y = load <16 x i32>, <16 x i32>* %yp, align 4
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = icmp eq <16 x i32> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 23:10:43 +08:00
|
|
|
define <16 x i32> @test4_unsigned(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test4_unsigned:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = icmp uge <16 x i32> %x, %y
|
2014-12-12 23:10:43 +08:00
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
|
2014-02-06 00:17:36 +08:00
|
|
|
ret <16 x i32> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test5(<8 x i64> %x, <8 x i64> %y) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test5:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = icmp eq <8 x i64> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %y
|
|
|
|
ret <8 x i64> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
2014-12-12 23:10:43 +08:00
|
|
|
define <8 x i64> @test6_unsigned(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test6_unsigned:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%mask = icmp ugt <8 x i64> %x, %y
|
2014-12-12 23:10:43 +08:00
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x1, <8 x i64> %y
|
2014-02-06 00:17:36 +08:00
|
|
|
ret <8 x i64> %max
|
2013-08-13 21:24:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test7(<4 x float> %a, <4 x float> %b) {
|
2015-05-07 19:24:42 +08:00
|
|
|
; KNL-LABEL: test7:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
|
|
|
; KNL-NEXT: vcmpltps %xmm2, %xmm0, %xmm2
|
|
|
|
; KNL-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
|
|
|
|
; KNL-NEXT: retq
|
2016-03-14 18:26:39 +08:00
|
|
|
;
|
2015-05-07 19:24:42 +08:00
|
|
|
; SKX-LABEL: test7:
|
|
|
|
; SKX: ## BB#0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; SKX-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: vcmpltps %xmm2, %xmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
2013-08-13 21:24:07 +08:00
|
|
|
%mask = fcmp olt <4 x float> %a, zeroinitializer
|
|
|
|
%c = select <4 x i1>%mask, <4 x float>%a, <4 x float>%b
|
|
|
|
ret <4 x float>%c
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test8(<2 x double> %a, <2 x double> %b) {
|
2015-05-07 19:24:42 +08:00
|
|
|
; KNL-LABEL: test8:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vxorpd %xmm2, %xmm2, %xmm2
|
|
|
|
; KNL-NEXT: vcmpltpd %xmm2, %xmm0, %xmm2
|
|
|
|
; KNL-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
|
|
|
|
; KNL-NEXT: retq
|
2016-03-14 18:26:39 +08:00
|
|
|
;
|
2015-05-07 19:24:42 +08:00
|
|
|
; SKX-LABEL: test8:
|
|
|
|
; SKX: ## BB#0:
|
2016-08-01 15:55:33 +08:00
|
|
|
; SKX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: vcmpltpd %xmm2, %xmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2013-08-13 21:24:07 +08:00
|
|
|
%mask = fcmp olt <2 x double> %a, zeroinitializer
|
|
|
|
%c = select <2 x i1>%mask, <2 x double>%a, <2 x double>%b
|
|
|
|
ret <2 x double>%c
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
|
2015-05-07 19:24:42 +08:00
|
|
|
; KNL-LABEL: test9:
|
|
|
|
; KNL: ## BB#0:
|
2016-07-09 08:19:07 +08:00
|
|
|
; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
|
|
|
|
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
|
2015-05-07 19:24:42 +08:00
|
|
|
; KNL-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
|
|
|
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
|
2015-05-07 19:24:42 +08:00
|
|
|
; KNL-NEXT: retq
|
2016-03-14 18:26:39 +08:00
|
|
|
;
|
|
|
|
; SKX-LABEL: test9:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2013-08-13 21:24:07 +08:00
|
|
|
%mask = icmp eq <8 x i32> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
|
|
|
|
ret <8 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
|
2015-05-07 19:24:42 +08:00
|
|
|
; KNL-LABEL: test10:
|
|
|
|
; KNL: ## BB#0:
|
2016-07-09 08:19:07 +08:00
|
|
|
; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
|
|
|
|
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
|
2015-05-07 19:24:42 +08:00
|
|
|
; KNL-NEXT: vcmpeqps %zmm1, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
|
|
|
|
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
|
2015-05-07 19:24:42 +08:00
|
|
|
; KNL-NEXT: retq
|
2016-03-14 18:26:39 +08:00
|
|
|
;
|
2015-05-07 19:24:42 +08:00
|
|
|
; SKX-LABEL: test10:
|
|
|
|
; SKX: ## BB#0:
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: vcmpeqps %ymm1, %ymm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
2013-08-13 21:24:07 +08:00
|
|
|
%mask = fcmp oeq <8 x float> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x float> %x, <8 x float> %y
|
|
|
|
ret <8 x float> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @test11_unsigned(<8 x i32> %x, <8 x i32> %y) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test11_unsigned:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2013-08-13 21:24:07 +08:00
|
|
|
%mask = icmp ugt <8 x i32> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
|
|
|
|
ret <8 x i32> %max
|
|
|
|
}
|
2013-12-17 16:33:15 +08:00
|
|
|
|
|
|
|
define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
|
2017-03-03 17:03:24 +08:00
|
|
|
; KNL-LABEL: test12:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
|
|
|
|
; KNL-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
|
|
|
|
; KNL-NEXT: kunpckbw %k0, %k1, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, %eax
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test12:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vpcmpeqq %zmm2, %zmm0, %k0
|
|
|
|
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
|
|
|
|
; SKX-NEXT: kunpckbw %k0, %k1, %k0
|
|
|
|
; SKX-NEXT: kmovw %k0, %eax
|
|
|
|
; SKX-NEXT: vzeroupper
|
|
|
|
; SKX-NEXT: retq
|
2013-12-17 16:33:15 +08:00
|
|
|
%res = icmp eq <16 x i64> %a, %b
|
|
|
|
%res1 = bitcast <16 x i1> %res to i16
|
|
|
|
ret i16 %res1
|
|
|
|
}
|
2014-01-22 20:26:19 +08:00
|
|
|
|
2015-09-08 21:10:00 +08:00
|
|
|
define i32 @test12_v32i32(<32 x i32> %a, <32 x i32> %b) nounwind {
|
2016-07-09 08:19:07 +08:00
|
|
|
; KNL-LABEL: test12_v32i32:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: pushq %rbp
|
|
|
|
; KNL-NEXT: movq %rsp, %rbp
|
|
|
|
; KNL-NEXT: andq $-32, %rsp
|
|
|
|
; KNL-NEXT: subq $32, %rsp
|
|
|
|
; KNL-NEXT: vpcmpeqd %zmm3, %zmm1, %k0
|
|
|
|
; KNL-NEXT: kshiftlw $14, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: kshiftlw $15, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %ecx
|
|
|
|
; KNL-NEXT: vmovd %ecx, %xmm1
|
|
|
|
; KNL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $13, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $12, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $11, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $10, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $9, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $8, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $7, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $6, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $5, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $4, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $3, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $2, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $1, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k0, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
|
|
|
|
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
|
|
|
|
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
|
|
|
|
; KNL-NEXT: vpcmpeqd %zmm2, %zmm0, %k0
|
|
|
|
; KNL-NEXT: kshiftlw $14, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: kshiftlw $15, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %ecx
|
|
|
|
; KNL-NEXT: vmovd %ecx, %xmm0
|
|
|
|
; KNL-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $13, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $12, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $11, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $10, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $9, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $8, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $7, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $6, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $5, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $4, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $3, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $2, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $1, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k0, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
|
|
|
|
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
|
|
|
|
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, (%rsp)
|
|
|
|
; KNL-NEXT: movl (%rsp), %eax
|
|
|
|
; KNL-NEXT: movq %rbp, %rsp
|
|
|
|
; KNL-NEXT: popq %rbp
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
2015-09-08 21:10:00 +08:00
|
|
|
; SKX-LABEL: test12_v32i32:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vpcmpeqd %zmm2, %zmm0, %k0
|
|
|
|
; SKX-NEXT: vpcmpeqd %zmm3, %zmm1, %k1
|
|
|
|
; SKX-NEXT: kunpckwd %k0, %k1, %k0
|
|
|
|
; SKX-NEXT: kmovd %k0, %eax
|
2017-03-03 17:03:24 +08:00
|
|
|
; SKX-NEXT: vzeroupper
|
2015-09-08 21:10:00 +08:00
|
|
|
; SKX-NEXT: retq
|
|
|
|
%res = icmp eq <32 x i32> %a, %b
|
|
|
|
%res1 = bitcast <32 x i1> %res to i32
|
|
|
|
ret i32 %res1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @test12_v64i16(<64 x i16> %a, <64 x i16> %b) nounwind {
|
2016-07-09 08:19:07 +08:00
|
|
|
; KNL-LABEL: test12_v64i16:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: pushq %rbp
|
|
|
|
; KNL-NEXT: movq %rsp, %rbp
|
|
|
|
; KNL-NEXT: andq $-32, %rsp
|
|
|
|
; KNL-NEXT: subq $64, %rsp
|
|
|
|
; KNL-NEXT: vpcmpeqw %ymm5, %ymm1, %ymm1
|
|
|
|
; KNL-NEXT: vpmovsxwd %ymm1, %zmm1
|
|
|
|
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
|
|
|
|
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
|
|
|
|
; KNL-NEXT: kshiftlw $14, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: kshiftlw $15, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %ecx
|
|
|
|
; KNL-NEXT: vmovd %ecx, %xmm1
|
|
|
|
; KNL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $13, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $12, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $11, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $10, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $9, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $8, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $7, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $6, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $5, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $4, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $3, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $2, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftlw $1, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k0, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
|
|
|
|
; KNL-NEXT: vpmovsxbd %xmm1, %zmm1
|
|
|
|
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
|
|
|
|
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
|
|
|
|
; KNL-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm0
|
|
|
|
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
|
|
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
|
|
|
|
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
|
|
|
|
; KNL-NEXT: kshiftlw $14, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: kshiftlw $15, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %ecx
|
|
|
|
; KNL-NEXT: vmovd %ecx, %xmm0
|
|
|
|
; KNL-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $13, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $12, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $11, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $10, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $9, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $8, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $7, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $6, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $5, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $4, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $3, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $2, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $1, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k0, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
|
|
|
|
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
|
|
|
|
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, (%rsp)
|
|
|
|
; KNL-NEXT: vpcmpeqw %ymm7, %ymm3, %ymm0
|
|
|
|
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
|
|
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
|
|
|
|
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
|
|
|
|
; KNL-NEXT: kshiftlw $14, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: kshiftlw $15, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %ecx
|
|
|
|
; KNL-NEXT: vmovd %ecx, %xmm0
|
|
|
|
; KNL-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $13, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $12, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $11, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $10, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $9, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $8, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $7, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $6, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $5, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $4, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $3, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $2, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $1, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k0, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
|
|
|
|
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
|
|
|
|
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
|
|
|
|
; KNL-NEXT: vpcmpeqw %ymm6, %ymm2, %ymm0
|
|
|
|
; KNL-NEXT: vpmovsxwd %ymm0, %zmm0
|
|
|
|
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
|
|
|
|
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
|
|
|
|
; KNL-NEXT: kshiftlw $14, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: kshiftlw $15, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %ecx
|
|
|
|
; KNL-NEXT: vmovd %ecx, %xmm0
|
|
|
|
; KNL-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $13, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $12, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $11, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $10, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $9, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $8, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $7, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $6, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $5, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $4, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $3, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $2, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftlw $1, %k0, %k1
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k1, %k1
|
|
|
|
; KNL-NEXT: kmovw %k1, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: kshiftrw $15, %k0, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, %eax
|
|
|
|
; KNL-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: vpmovsxbd %xmm0, %zmm0
|
|
|
|
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
|
|
|
|
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
|
|
|
|
; KNL-NEXT: kmovw %k0, {{[0-9]+}}(%rsp)
|
|
|
|
; KNL-NEXT: movl (%rsp), %ecx
|
|
|
|
; KNL-NEXT: movl {{[0-9]+}}(%rsp), %eax
|
|
|
|
; KNL-NEXT: shlq $32, %rax
|
|
|
|
; KNL-NEXT: orq %rcx, %rax
|
|
|
|
; KNL-NEXT: movq %rbp, %rsp
|
|
|
|
; KNL-NEXT: popq %rbp
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
2015-09-08 21:10:00 +08:00
|
|
|
; SKX-LABEL: test12_v64i16:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vpcmpeqw %zmm2, %zmm0, %k0
|
|
|
|
; SKX-NEXT: vpcmpeqw %zmm3, %zmm1, %k1
|
|
|
|
; SKX-NEXT: kunpckdq %k0, %k1, %k0
|
|
|
|
; SKX-NEXT: kmovq %k0, %rax
|
2017-03-03 17:03:24 +08:00
|
|
|
; SKX-NEXT: vzeroupper
|
2015-09-08 21:10:00 +08:00
|
|
|
; SKX-NEXT: retq
|
|
|
|
%res = icmp eq <64 x i16> %a, %b
|
|
|
|
%res1 = bitcast <64 x i1> %res to i64
|
|
|
|
ret i64 %res1
|
|
|
|
}
|
|
|
|
|
2014-01-22 20:26:19 +08:00
|
|
|
define <16 x i32> @test13(<16 x float>%a, <16 x float>%b)
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test13:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
|
|
|
|
; CHECK-NEXT: retq
|
2014-01-22 20:26:19 +08:00
|
|
|
{
|
|
|
|
%cmpvector_i = fcmp oeq <16 x float> %a, %b
|
|
|
|
%conv = zext <16 x i1> %cmpvector_i to <16 x i32>
|
|
|
|
ret <16 x i32> %conv
|
|
|
|
}
|
2014-02-06 00:17:36 +08:00
|
|
|
|
|
|
|
define <16 x i32> @test14(<16 x i32>%a, <16 x i32>%b) {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test14:
|
|
|
|
; CHECK: ## BB#0:
|
2017-01-08 06:20:28 +08:00
|
|
|
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm2
|
Fix bug 30945- [AVX512] Failure to flip vector comparison to remove not mask instruction
adding new optimization opportunity by adding new X86ISelLowering pattern. The test case was shown in https://llvm.org/bugs/show_bug.cgi?id=30945.
Test explanation:
Select gets three arguments mask, op and op2. In this case, the Mask is a result of ICMP. The ICMP instruction compares (with equal operand) the zero initializer vector and the result of the first ICMP.
In general, The result of "cmp eq, op1, zero initializers" is "not(op1)" where op1 is a mask. By rearranging of the two arguments inside the Select instruction, we can get the same result. Without the necessary of the middle phase ("cmp eq, op1, zero initializers").
Missed optimization opportunity:
vpcmpled %zmm0, %zmm1, %k0
knotw %k0, %k1
can be combine to
vpcmpgtd %zmm0, %zmm2, %k1
Reviewers:
1. delena
2. igorb
Commited after check all
Differential Revision: https://reviews.llvm.org/D27160
llvm-svn: 289653
2016-12-14 22:57:10 +08:00
|
|
|
; CHECK-NEXT: vpcmpgtd %zmm0, %zmm2, %k1
|
2017-01-08 06:20:28 +08:00
|
|
|
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%sub_r = sub <16 x i32> %a, %b
|
|
|
|
%cmp.i2.i = icmp sgt <16 x i32> %sub_r, %a
|
|
|
|
%sext.i3.i = sext <16 x i1> %cmp.i2.i to <16 x i32>
|
|
|
|
%mask = icmp eq <16 x i32> %sext.i3.i, zeroinitializer
|
|
|
|
%res = select <16 x i1> %mask, <16 x i32> zeroinitializer, <16 x i32> %sub_r
|
|
|
|
ret <16 x i32>%res
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test15(<8 x i64>%a, <8 x i64>%b) {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test15:
|
|
|
|
; CHECK: ## BB#0:
|
2017-01-08 06:20:28 +08:00
|
|
|
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm2
|
Fix bug 30945- [AVX512] Failure to flip vector comparison to remove not mask instruction
adding new optimization opportunity by adding new X86ISelLowering pattern. The test case was shown in https://llvm.org/bugs/show_bug.cgi?id=30945.
Test explanation:
Select gets three arguments mask, op and op2. In this case, the Mask is a result of ICMP. The ICMP instruction compares (with equal operand) the zero initializer vector and the result of the first ICMP.
In general, The result of "cmp eq, op1, zero initializers" is "not(op1)" where op1 is a mask. By rearranging of the two arguments inside the Select instruction, we can get the same result. Without the necessary of the middle phase ("cmp eq, op1, zero initializers").
Missed optimization opportunity:
vpcmpled %zmm0, %zmm1, %k0
knotw %k0, %k1
can be combine to
vpcmpgtd %zmm0, %zmm2, %k1
Reviewers:
1. delena
2. igorb
Commited after check all
Differential Revision: https://reviews.llvm.org/D27160
llvm-svn: 289653
2016-12-14 22:57:10 +08:00
|
|
|
; CHECK-NEXT: vpcmpgtq %zmm0, %zmm2, %k1
|
2017-01-08 06:20:28 +08:00
|
|
|
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-02-06 00:17:36 +08:00
|
|
|
%sub_r = sub <8 x i64> %a, %b
|
|
|
|
%cmp.i2.i = icmp sgt <8 x i64> %sub_r, %a
|
|
|
|
%sext.i3.i = sext <8 x i1> %cmp.i2.i to <8 x i64>
|
|
|
|
%mask = icmp eq <8 x i64> %sext.i3.i, zeroinitializer
|
|
|
|
%res = select <8 x i1> %mask, <8 x i64> zeroinitializer, <8 x i64> %sub_r
|
|
|
|
ret <8 x i64>%res
|
|
|
|
}
|
|
|
|
|
2014-12-12 23:10:43 +08:00
|
|
|
define <16 x i32> @test16(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test16:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask = icmp sge <16 x i32> %x, %y
|
2014-12-12 23:10:43 +08:00
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
|
2014-08-29 16:46:04 +08:00
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test17(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test17:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2015-02-28 05:17:42 +08:00
|
|
|
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask = icmp sgt <16 x i32> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test18(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test18:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpled (%rdi), %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2015-02-28 05:17:42 +08:00
|
|
|
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask = icmp sle <16 x i32> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test19(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test19:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2015-02-28 05:17:42 +08:00
|
|
|
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask = icmp ule <16 x i32> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test20(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32> %y1) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test20:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp eq <16 x i32> %x1, %y1
|
|
|
|
%mask0 = icmp eq <16 x i32> %x, %y
|
|
|
|
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %y
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test21(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test21:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k1
|
|
|
|
; CHECK-NEXT: vpcmpleq %zmm2, %zmm3, %k1 {%k1}
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmq %zmm0, %zmm2, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp sge <8 x i64> %x1, %y1
|
|
|
|
%mask0 = icmp sle <8 x i64> %x, %y
|
|
|
|
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
|
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %x1
|
|
|
|
ret <8 x i64> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test22(<8 x i64> %x, <8 x i64>* %y.ptr, <8 x i64> %x1, <8 x i64> %y1) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test22:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpgtq %zmm2, %zmm1, %k1
|
|
|
|
; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k1 {%k1}
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp sgt <8 x i64> %x1, %y1
|
2015-02-28 05:17:42 +08:00
|
|
|
%y = load <8 x i64>, <8 x i64>* %y.ptr, align 4
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask0 = icmp sgt <8 x i64> %x, %y
|
|
|
|
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
|
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %x1
|
|
|
|
ret <8 x i64> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test23(<16 x i32> %x, <16 x i32>* %y.ptr, <16 x i32> %x1, <16 x i32> %y1) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test23:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpled %zmm1, %zmm2, %k1
|
|
|
|
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1 {%k1}
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp sge <16 x i32> %x1, %y1
|
2015-02-28 05:17:42 +08:00
|
|
|
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask0 = icmp ule <16 x i32> %x, %y
|
|
|
|
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test24(<8 x i64> %x, <8 x i64> %x1, i64* %yb.ptr) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test24:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2015-02-28 05:17:42 +08:00
|
|
|
%yb = load i64, i64* %yb.ptr, align 4
|
2014-08-29 16:46:04 +08:00
|
|
|
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
|
|
|
|
%y = shufflevector <8 x i64> %y.0, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
|
|
%mask = icmp eq <8 x i64> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %x1
|
|
|
|
ret <8 x i64> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test25(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test25:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpled (%rdi){1to16}, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2015-02-28 05:17:42 +08:00
|
|
|
%yb = load i32, i32* %yb.ptr, align 4
|
2014-08-29 16:46:04 +08:00
|
|
|
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
|
|
|
|
%y = shufflevector <16 x i32> %y.0, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
|
|
%mask = icmp sle <16 x i32> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i32> @test26(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1, <16 x i32> %y1) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test26:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpled %zmm1, %zmm2, %k1
|
|
|
|
; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k1 {%k1}
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp sge <16 x i32> %x1, %y1
|
2015-02-28 05:17:42 +08:00
|
|
|
%yb = load i32, i32* %yb.ptr, align 4
|
2014-08-29 16:46:04 +08:00
|
|
|
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
|
|
|
|
%y = shufflevector <16 x i32> %y.0, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
|
|
%mask0 = icmp sgt <16 x i32> %x, %y
|
|
|
|
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
|
|
|
|
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
|
|
|
|
ret <16 x i32> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i64> @test27(<8 x i64> %x, i64* %yb.ptr, <8 x i64> %x1, <8 x i64> %y1) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test27:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vpcmpleq %zmm1, %zmm2, %k1
|
|
|
|
; CHECK-NEXT: vpcmpleq (%rdi){1to8}, %zmm0, %k1 {%k1}
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2014-08-29 16:46:04 +08:00
|
|
|
%mask1 = icmp sge <8 x i64> %x1, %y1
|
2015-02-28 05:17:42 +08:00
|
|
|
%yb = load i64, i64* %yb.ptr, align 4
|
2014-08-29 16:46:04 +08:00
|
|
|
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
|
|
|
|
%y = shufflevector <8 x i64> %y.0, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
|
|
%mask0 = icmp sle <8 x i64> %x, %y
|
|
|
|
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
|
|
|
|
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %x1
|
|
|
|
ret <8 x i64> %max
|
|
|
|
}
|
2015-04-27 20:57:59 +08:00
|
|
|
|
|
|
|
define <8 x i32>@test28(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1) {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test28:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
|
|
|
|
; KNL-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
|
|
|
|
; KNL-NEXT: kxnorw %k1, %k0, %k1
|
2017-01-09 10:44:34 +08:00
|
|
|
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-NEXT: vpmovqd %zmm0, %ymm0
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test28:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
|
|
|
|
; SKX-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
|
|
|
|
; SKX-NEXT: kxnorb %k1, %k0, %k0
|
|
|
|
; SKX-NEXT: vpmovm2d %k0, %ymm0
|
|
|
|
; SKX-NEXT: retq
|
2015-04-27 20:57:59 +08:00
|
|
|
%x_gt_y = icmp sgt <8 x i64> %x, %y
|
|
|
|
%x1_gt_y1 = icmp sgt <8 x i64> %x1, %y1
|
|
|
|
%res = icmp eq <8 x i1>%x_gt_y, %x1_gt_y1
|
|
|
|
%resse = sext <8 x i1>%res to <8 x i32>
|
|
|
|
ret <8 x i32> %resse
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i8>@test29(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32> %y1) {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test29:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
|
|
|
|
; KNL-NEXT: vpcmpgtd %zmm3, %zmm2, %k1
|
|
|
|
; KNL-NEXT: kxorw %k1, %k0, %k1
|
2017-01-09 10:44:34 +08:00
|
|
|
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-NEXT: vpmovdb %zmm0, %xmm0
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test29:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
|
|
|
|
; SKX-NEXT: vpcmpgtd %zmm3, %zmm2, %k1
|
|
|
|
; SKX-NEXT: kxorw %k1, %k0, %k0
|
|
|
|
; SKX-NEXT: vpmovm2b %k0, %xmm0
|
2017-03-03 17:03:24 +08:00
|
|
|
; SKX-NEXT: vzeroupper
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-04-27 20:57:59 +08:00
|
|
|
%x_gt_y = icmp sgt <16 x i32> %x, %y
|
|
|
|
%x1_gt_y1 = icmp sgt <16 x i32> %x1, %y1
|
|
|
|
%res = icmp ne <16 x i1>%x_gt_y, %x1_gt_y1
|
|
|
|
%resse = sext <16 x i1>%res to <16 x i8>
|
|
|
|
ret <16 x i8> %resse
|
2015-05-07 19:24:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test30(<4 x double> %x, <4 x double> %y) nounwind {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test30:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm2
|
|
|
|
; KNL-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
2015-05-07 19:24:42 +08:00
|
|
|
; SKX-LABEL: test30:
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
|
|
|
%mask = fcmp oeq <4 x double> %x, %y
|
|
|
|
%max = select <4 x i1> %mask, <4 x double> %x, <4 x double> %y
|
|
|
|
ret <4 x double> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test31(<2 x double> %x, <2 x double> %x1, <2 x double>* %yp) nounwind {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test31:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vcmpltpd (%rdi), %xmm0, %xmm2
|
|
|
|
; KNL-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test31:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vcmpltpd (%rdi), %xmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
|
|
|
%y = load <2 x double>, <2 x double>* %yp, align 4
|
|
|
|
%mask = fcmp olt <2 x double> %x, %y
|
|
|
|
%max = select <2 x i1> %mask, <2 x double> %x, <2 x double> %x1
|
|
|
|
ret <2 x double> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test32(<4 x double> %x, <4 x double> %x1, <4 x double>* %yp) nounwind {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test32:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vcmpltpd (%rdi), %ymm0, %ymm2
|
|
|
|
; KNL-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
2015-05-07 19:24:42 +08:00
|
|
|
; SKX-LABEL: test32:
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vcmpltpd (%rdi), %ymm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
|
|
|
%y = load <4 x double>, <4 x double>* %yp, align 4
|
|
|
|
%mask = fcmp ogt <4 x double> %y, %x
|
|
|
|
%max = select <4 x i1> %mask, <4 x double> %x, <4 x double> %x1
|
|
|
|
ret <4 x double> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x double> @test33(<8 x double> %x, <8 x double> %x1, <8 x double>* %yp) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test33:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vcmpltpd (%rdi), %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
%y = load <8 x double>, <8 x double>* %yp, align 4
|
|
|
|
%mask = fcmp olt <8 x double> %x, %y
|
|
|
|
%max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %x1
|
|
|
|
ret <8 x double> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test34(<4 x float> %x, <4 x float> %x1, <4 x float>* %yp) nounwind {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test34:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vcmpltps (%rdi), %xmm0, %xmm2
|
|
|
|
; KNL-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test34:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vcmpltps (%rdi), %xmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
%y = load <4 x float>, <4 x float>* %yp, align 4
|
|
|
|
%mask = fcmp olt <4 x float> %x, %y
|
|
|
|
%max = select <4 x i1> %mask, <4 x float> %x, <4 x float> %x1
|
|
|
|
ret <4 x float> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) nounwind {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test35:
|
|
|
|
; KNL: ## BB#0:
|
2016-07-09 08:19:07 +08:00
|
|
|
; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
|
|
|
|
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-NEXT: vmovups (%rdi), %ymm2
|
|
|
|
; KNL-NEXT: vcmpltps %zmm2, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
|
|
|
|
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
2015-05-07 19:24:42 +08:00
|
|
|
; SKX-LABEL: test35:
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vcmpltps (%rdi), %ymm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
|
|
|
%y = load <8 x float>, <8 x float>* %yp, align 4
|
|
|
|
%mask = fcmp ogt <8 x float> %y, %x
|
|
|
|
%max = select <8 x i1> %mask, <8 x float> %x, <8 x float> %x1
|
|
|
|
ret <8 x float> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x float> @test36(<16 x float> %x, <16 x float> %x1, <16 x float>* %yp) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test36:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vcmpltps (%rdi), %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
%y = load <16 x float>, <16 x float>* %yp, align 4
|
|
|
|
%mask = fcmp olt <16 x float> %x, %y
|
|
|
|
%max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %x1
|
|
|
|
ret <16 x float> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x double> @test37(<8 x double> %x, <8 x double> %x1, double* %ptr) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test37:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
|
|
|
%a = load double, double* %ptr
|
|
|
|
%v = insertelement <8 x double> undef, double %a, i32 0
|
|
|
|
%shuffle = shufflevector <8 x double> %v, <8 x double> undef, <8 x i32> zeroinitializer
|
|
|
|
|
|
|
|
%mask = fcmp ogt <8 x double> %shuffle, %x
|
|
|
|
%max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %x1
|
|
|
|
ret <8 x double> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @test38(<4 x double> %x, <4 x double> %x1, double* %ptr) nounwind {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test38:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vbroadcastsd (%rdi), %ymm2
|
|
|
|
; KNL-NEXT: vcmpltpd %ymm2, %ymm0, %ymm2
|
|
|
|
; KNL-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test38:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vcmpltpd (%rdi){1to4}, %ymm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
|
|
|
%a = load double, double* %ptr
|
|
|
|
%v = insertelement <4 x double> undef, double %a, i32 0
|
|
|
|
%shuffle = shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> zeroinitializer
|
2016-03-14 18:26:39 +08:00
|
|
|
|
2015-05-07 19:24:42 +08:00
|
|
|
%mask = fcmp ogt <4 x double> %shuffle, %x
|
|
|
|
%max = select <4 x i1> %mask, <4 x double> %x, <4 x double> %x1
|
|
|
|
ret <4 x double> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @test39(<2 x double> %x, <2 x double> %x1, double* %ptr) nounwind {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test39:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
|
|
|
|
; KNL-NEXT: vcmpltpd %xmm2, %xmm0, %xmm2
|
|
|
|
; KNL-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test39:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vcmpltpd (%rdi){1to2}, %xmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
|
|
|
%a = load double, double* %ptr
|
|
|
|
%v = insertelement <2 x double> undef, double %a, i32 0
|
|
|
|
%shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 0, i32 0>
|
2016-03-14 18:26:39 +08:00
|
|
|
|
2015-05-07 19:24:42 +08:00
|
|
|
%mask = fcmp ogt <2 x double> %shuffle, %x
|
|
|
|
%max = select <2 x i1> %mask, <2 x double> %x, <2 x double> %x1
|
|
|
|
ret <2 x double> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
define <16 x float> @test40(<16 x float> %x, <16 x float> %x1, float* %ptr) nounwind {
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-LABEL: test40:
|
|
|
|
; CHECK: ## BB#0:
|
|
|
|
; CHECK-NEXT: vcmpltps (%rdi){1to16}, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
|
2016-05-22 08:39:33 +08:00
|
|
|
; CHECK-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
|
|
|
%a = load float, float* %ptr
|
|
|
|
%v = insertelement <16 x float> undef, float %a, i32 0
|
|
|
|
%shuffle = shufflevector <16 x float> %v, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
2016-03-14 18:26:39 +08:00
|
|
|
|
2015-05-07 19:24:42 +08:00
|
|
|
%mask = fcmp ogt <16 x float> %shuffle, %x
|
|
|
|
%max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %x1
|
|
|
|
ret <16 x float> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) nounwind {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test41:
|
|
|
|
; KNL: ## BB#0:
|
2016-07-09 08:19:07 +08:00
|
|
|
; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
|
|
|
|
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-NEXT: vbroadcastss (%rdi), %ymm2
|
|
|
|
; KNL-NEXT: vcmpltps %zmm2, %zmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
|
|
|
|
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test41:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vcmpltps (%rdi){1to8}, %ymm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
|
|
|
%a = load float, float* %ptr
|
|
|
|
%v = insertelement <8 x float> undef, float %a, i32 0
|
|
|
|
%shuffle = shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
2016-03-14 18:26:39 +08:00
|
|
|
|
2015-05-07 19:24:42 +08:00
|
|
|
%mask = fcmp ogt <8 x float> %shuffle, %x
|
|
|
|
%max = select <8 x i1> %mask, <8 x float> %x, <8 x float> %x1
|
|
|
|
ret <8 x float> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @test42(<4 x float> %x, <4 x float> %x1, float* %ptr) nounwind {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test42:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vbroadcastss (%rdi), %xmm2
|
|
|
|
; KNL-NEXT: vcmpltps %xmm2, %xmm0, %xmm2
|
|
|
|
; KNL-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test42:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vcmpltps (%rdi){1to4}, %xmm0, %k1
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
|
|
|
|
2015-05-07 19:24:42 +08:00
|
|
|
%a = load float, float* %ptr
|
|
|
|
%v = insertelement <4 x float> undef, float %a, i32 0
|
|
|
|
%shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
|
2016-03-14 18:26:39 +08:00
|
|
|
|
2015-05-07 19:24:42 +08:00
|
|
|
%mask = fcmp ogt <4 x float> %shuffle, %x
|
|
|
|
%max = select <4 x i1> %mask, <4 x float> %x, <4 x float> %x1
|
|
|
|
ret <4 x float> %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x double> @test43(<8 x double> %x, <8 x double> %x1, double* %ptr,<8 x i1> %mask_in) nounwind {
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-LABEL: test43:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vpmovsxwq %xmm2, %zmm2
|
|
|
|
; KNL-NEXT: vpsllq $63, %zmm2, %zmm2
|
|
|
|
; KNL-NEXT: vptestmq %zmm2, %zmm2, %k1
|
|
|
|
; KNL-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1 {%k1}
|
2017-01-14 15:50:52 +08:00
|
|
|
; KNL-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test43:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vpsllw $15, %xmm2, %xmm2
|
|
|
|
; SKX-NEXT: vpmovw2m %xmm2, %k1
|
|
|
|
; SKX-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1 {%k1}
|
2017-01-14 15:50:52 +08:00
|
|
|
; SKX-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: retq
|
2015-05-07 19:24:42 +08:00
|
|
|
|
|
|
|
%a = load double, double* %ptr
|
|
|
|
%v = insertelement <8 x double> undef, double %a, i32 0
|
|
|
|
%shuffle = shufflevector <8 x double> %v, <8 x double> undef, <8 x i32> zeroinitializer
|
2016-03-14 18:26:39 +08:00
|
|
|
|
2015-05-07 19:24:42 +08:00
|
|
|
%mask_cmp = fcmp ogt <8 x double> %shuffle, %x
|
|
|
|
%mask = and <8 x i1> %mask_cmp, %mask_in
|
|
|
|
%max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %x1
|
|
|
|
ret <8 x double> %max
|
|
|
|
}
|
2016-03-14 18:26:39 +08:00
|
|
|
|
|
|
|
define <4 x i32> @test44(<4 x i16> %x, <4 x i16> %y) #0 {
|
|
|
|
; KNL-LABEL: test44:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
|
|
; KNL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
|
|
|
|
; KNL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
|
|
|
|
; KNL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test44:
|
|
|
|
; SKX: ## BB#0:
|
2016-12-28 18:12:48 +08:00
|
|
|
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
|
|
|
|
; SKX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
|
|
|
|
; SKX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
|
|
|
|
; SKX-NEXT: vpmovm2d %k0, %xmm0
|
|
|
|
; SKX-NEXT: retq
|
|
|
|
%mask = icmp eq <4 x i16> %x, %y
|
|
|
|
%1 = sext <4 x i1> %mask to <4 x i32>
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @test45(<2 x i16> %x, <2 x i16> %y) #0 {
|
|
|
|
; KNL-LABEL: test45:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
|
|
; KNL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
|
|
|
|
; KNL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
|
|
|
|
; KNL-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
|
2016-07-14 00:04:07 +08:00
|
|
|
; KNL-NEXT: vpsrlq $63, %xmm0, %xmm0
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test45:
|
|
|
|
; SKX: ## BB#0:
|
2016-12-28 18:12:48 +08:00
|
|
|
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
2016-03-14 18:26:39 +08:00
|
|
|
; SKX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
|
|
|
|
; SKX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
|
|
|
|
; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
|
|
|
|
; SKX-NEXT: vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z}
|
|
|
|
; SKX-NEXT: retq
|
|
|
|
%mask = icmp eq <2 x i16> %x, %y
|
|
|
|
%1 = zext <2 x i1> %mask to <2 x i64>
|
|
|
|
ret <2 x i64> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @test46(<2 x float> %x, <2 x float> %y) #0 {
|
|
|
|
; KNL-LABEL: test46:
|
|
|
|
; KNL: ## BB#0:
|
|
|
|
; KNL-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
|
2017-02-17 23:14:48 +08:00
|
|
|
; KNL-NEXT: vpmovsxdq %xmm0, %xmm0
|
2016-03-14 18:26:39 +08:00
|
|
|
; KNL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
|
|
; KNL-NEXT: retq
|
|
|
|
;
|
|
|
|
; SKX-LABEL: test46:
|
|
|
|
; SKX: ## BB#0:
|
|
|
|
; SKX-NEXT: vcmpeqps %xmm1, %xmm0, %k1
|
|
|
|
; SKX-NEXT: vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z}
|
|
|
|
; SKX-NEXT: retq
|
|
|
|
%mask = fcmp oeq <2 x float> %x, %y
|
|
|
|
%1 = zext <2 x i1> %mask to <2 x i64>
|
|
|
|
ret <2 x i64> %1
|
|
|
|
}
|