2015-11-24 05:33:58 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2017-04-19 06:36:59 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512
|
2011-07-13 09:15:33 +08:00
|
|
|
|
|
|
|
define <4 x double> @andpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: andpd256:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vandpd %ymm0, %ymm1, %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %x to <4 x i64>
|
|
|
|
%1 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%and.i = and <4 x i64> %0, %1
|
|
|
|
%2 = bitcast <4 x i64> %and.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%3 = fadd <4 x double> %2, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %3
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @andpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: andpd256fold:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vandpd {{.*}}(%rip), %ymm0, %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%and.i = and <4 x i64> %0, <i64 4616752568008179712, i64 4614838538166547251, i64 4612361558371493478, i64 4608083138725491507>
|
|
|
|
%1 = bitcast <4 x i64> %and.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%2 = fadd <4 x double> %1, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %2
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @andps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: andps256:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vandps %ymm0, %ymm1, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %x to <8 x i32>
|
|
|
|
%1 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%and.i = and <8 x i32> %0, %1
|
|
|
|
%2 = bitcast <8 x i32> %and.i to <8 x float>
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @andps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: andps256fold:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%and.i = and <8 x i32> %0, <i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938, i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938>
|
|
|
|
%1 = bitcast <8 x i32> %and.i to <8 x float>
|
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @xorpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: xorpd256:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vxorpd %ymm0, %ymm1, %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %x to <4 x i64>
|
|
|
|
%1 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%xor.i = xor <4 x i64> %0, %1
|
|
|
|
%2 = bitcast <4 x i64> %xor.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%3 = fadd <4 x double> %2, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %3
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @xorpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: xorpd256fold:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vxorpd {{.*}}(%rip), %ymm0, %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%xor.i = xor <4 x i64> %0, <i64 4616752568008179712, i64 4614838538166547251, i64 4612361558371493478, i64 4608083138725491507>
|
|
|
|
%1 = bitcast <4 x i64> %xor.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%2 = fadd <4 x double> %1, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %2
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @xorps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: xorps256:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vxorps %ymm0, %ymm1, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %x to <8 x i32>
|
|
|
|
%1 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%xor.i = xor <8 x i32> %0, %1
|
|
|
|
%2 = bitcast <8 x i32> %xor.i to <8 x float>
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @xorps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: xorps256fold:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%xor.i = xor <8 x i32> %0, <i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938, i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938>
|
|
|
|
%1 = bitcast <8 x i32> %xor.i to <8 x float>
|
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @orpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: orpd256:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vorpd %ymm0, %ymm1, %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %x to <4 x i64>
|
|
|
|
%1 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%or.i = or <4 x i64> %0, %1
|
|
|
|
%2 = bitcast <4 x i64> %or.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%3 = fadd <4 x double> %2, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %3
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @orpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: orpd256fold:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vorpd {{.*}}(%rip), %ymm0, %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%or.i = or <4 x i64> %0, <i64 4616752568008179712, i64 4614838538166547251, i64 4612361558371493478, i64 4608083138725491507>
|
|
|
|
%1 = bitcast <4 x i64> %or.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%2 = fadd <4 x double> %1, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %2
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @orps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: orps256:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vorps %ymm0, %ymm1, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %x to <8 x i32>
|
|
|
|
%1 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%or.i = or <8 x i32> %0, %1
|
|
|
|
%2 = bitcast <8 x i32> %or.i to <8 x float>
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @orps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: orps256fold:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%or.i = or <8 x i32> %0, <i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938, i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938>
|
|
|
|
%1 = bitcast <8 x i32> %or.i to <8 x float>
|
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
|
|
|
|
2011-07-14 05:36:51 +08:00
|
|
|
define <4 x double> @andnotpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: andnotpd256:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vandnpd %ymm0, %ymm1, %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-14 05:36:51 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %x to <4 x i64>
|
|
|
|
%neg.i = xor <4 x i64> %0, <i64 -1, i64 -1, i64 -1, i64 -1>
|
|
|
|
%1 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%and.i = and <4 x i64> %1, %neg.i
|
|
|
|
%2 = bitcast <4 x i64> %and.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%3 = fadd <4 x double> %2, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %3
|
2011-07-14 05:36:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @andnotpd256fold(<4 x double> %y, <4 x double>* nocapture %x) nounwind uwtable readonly ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: andnotpd256fold:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vandnpd (%rdi), %ymm0, %ymm0
|
2017-07-28 01:47:01 +08:00
|
|
|
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-14 05:36:51 +08:00
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp2 = load <4 x double>, <4 x double>* %x, align 32
|
2011-07-14 05:36:51 +08:00
|
|
|
%0 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%neg.i = xor <4 x i64> %0, <i64 -1, i64 -1, i64 -1, i64 -1>
|
|
|
|
%1 = bitcast <4 x double> %tmp2 to <4 x i64>
|
|
|
|
%and.i = and <4 x i64> %1, %neg.i
|
|
|
|
%2 = bitcast <4 x i64> %and.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%3 = fadd <4 x double> %2, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %3
|
2011-07-14 05:36:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @andnotps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: andnotps256:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vandnps %ymm0, %ymm1, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-14 05:36:51 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %x to <8 x i32>
|
|
|
|
%neg.i = xor <8 x i32> %0, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
|
|
|
|
%1 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%and.i = and <8 x i32> %1, %neg.i
|
|
|
|
%2 = bitcast <8 x i32> %and.i to <8 x float>
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @andnotps256fold(<8 x float> %y, <8 x float>* nocapture %x) nounwind uwtable readonly ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: andnotps256fold:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vandnps (%rdi), %ymm0, %ymm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-07-14 05:36:51 +08:00
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp2 = load <8 x float>, <8 x float>* %x, align 32
|
2011-07-14 05:36:51 +08:00
|
|
|
%0 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%neg.i = xor <8 x i32> %0, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
|
|
|
|
%1 = bitcast <8 x float> %tmp2 to <8 x i32>
|
|
|
|
%and.i = and <8 x i32> %1, %neg.i
|
|
|
|
%2 = bitcast <8 x i32> %and.i to <8 x float>
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
2011-08-18 10:11:34 +08:00
|
|
|
|
|
|
|
;;; Test that basic 2 x i64 logic use the integer version on AVX
|
|
|
|
|
|
|
|
define <2 x i64> @vpandn(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: vpandn:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
[x86] transform vector inc/dec to use -1 constant (PR33483)
Convert vector increment or decrement to sub/add with an all-ones constant:
add X, <1, 1...> --> sub X, <-1, -1...>
sub X, <1, 1...> --> add X, <-1, -1...>
The all-ones vector constant can be materialized using a pcmpeq instruction that is
commonly recognized as an idiom (has no register dependency), so that's better than
loading a splat 1 constant.
AVX512 uses 'vpternlogd' for 512-bit vectors because there is apparently no better
way to produce 512 one-bits.
The general advantages of this lowering are:
1. pcmpeq has lower latency than a memop on every uarch I looked at in Agner's tables,
so in theory, this could be better for perf, but...
2. That seems unlikely to affect any OOO implementation, and I can't measure any real
perf difference from this transform on Haswell or Jaguar, but...
3. It doesn't look like it from the diffs, but this is an overall size win because we
eliminate 16 - 64 constant bytes in the case of a vector load. If we're broadcasting
a scalar load (which might itself be a bug), then we're replacing a scalar constant
load + broadcast with a single cheap op, so that should always be smaller/better too.
4. This makes the DAG/isel output more consistent - we use pcmpeq already for padd x, -1
and psub x, -1, so we should use that form for +1 too because we can. If there's some
reason to favor a constant load on some CPU, let's make the reverse transform for all
of these cases (either here in the DAG or in a later machine pass).
This should fix:
https://bugs.llvm.org/show_bug.cgi?id=33483
Differential Revision: https://reviews.llvm.org/D34336
llvm-svn: 306289
2017-06-26 22:19:26 +08:00
|
|
|
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
|
|
|
|
; CHECK-NEXT: vpsubq %xmm1, %xmm0, %xmm1
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vpandn %xmm0, %xmm1, %xmm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-08-18 10:11:34 +08:00
|
|
|
entry:
|
2011-11-08 07:08:21 +08:00
|
|
|
; Force the execution domain with an add.
|
|
|
|
%a2 = add <2 x i64> %a, <i64 1, i64 1>
|
|
|
|
%y = xor <2 x i64> %a2, <i64 -1, i64 -1>
|
2011-08-18 10:11:34 +08:00
|
|
|
%x = and <2 x i64> %a, %y
|
|
|
|
ret <2 x i64> %x
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @vpand(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-LABEL: vpand:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
[x86] transform vector inc/dec to use -1 constant (PR33483)
Convert vector increment or decrement to sub/add with an all-ones constant:
add X, <1, 1...> --> sub X, <-1, -1...>
sub X, <1, 1...> --> add X, <-1, -1...>
The all-ones vector constant can be materialized using a pcmpeq instruction that is
commonly recognized as an idiom (has no register dependency), so that's better than
loading a splat 1 constant.
AVX512 uses 'vpternlogd' for 512-bit vectors because there is apparently no better
way to produce 512 one-bits.
The general advantages of this lowering are:
1. pcmpeq has lower latency than a memop on every uarch I looked at in Agner's tables,
so in theory, this could be better for perf, but...
2. That seems unlikely to affect any OOO implementation, and I can't measure any real
perf difference from this transform on Haswell or Jaguar, but...
3. It doesn't look like it from the diffs, but this is an overall size win because we
eliminate 16 - 64 constant bytes in the case of a vector load. If we're broadcasting
a scalar load (which might itself be a bug), then we're replacing a scalar constant
load + broadcast with a single cheap op, so that should always be smaller/better too.
4. This makes the DAG/isel output more consistent - we use pcmpeq already for padd x, -1
and psub x, -1, so we should use that form for +1 too because we can. If there's some
reason to favor a constant load on some CPU, let's make the reverse transform for all
of these cases (either here in the DAG or in a later machine pass).
This should fix:
https://bugs.llvm.org/show_bug.cgi?id=33483
Differential Revision: https://reviews.llvm.org/D34336
llvm-svn: 306289
2017-06-26 22:19:26 +08:00
|
|
|
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
|
|
; CHECK-NEXT: vpsubq %xmm2, %xmm0, %xmm0
|
2015-03-26 01:34:11 +08:00
|
|
|
; CHECK-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
|
|
; CHECK-NEXT: retq
|
2011-08-18 10:11:34 +08:00
|
|
|
entry:
|
2011-11-08 07:08:21 +08:00
|
|
|
; Force the execution domain with an add.
|
|
|
|
%a2 = add <2 x i64> %a, <i64 1, i64 1>
|
|
|
|
%x = and <2 x i64> %a2, %b
|
2011-08-18 10:11:34 +08:00
|
|
|
ret <2 x i64> %x
|
|
|
|
}
|
|
|
|
|
2017-04-19 06:36:59 +08:00
|
|
|
define <4 x i32> @and_xor_splat1_v4i32(<4 x i32> %x) nounwind {
|
|
|
|
; AVX-LABEL: and_xor_splat1_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2017-04-20 05:23:09 +08:00
|
|
|
; AVX-NEXT: vandnps {{.*}}(%rip), %xmm0, %xmm0
|
2017-04-19 06:36:59 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: and_xor_splat1_v4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-07-27 20:32:45 +08:00
|
|
|
; AVX512-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
|
2017-04-20 05:23:09 +08:00
|
|
|
; AVX512-NEXT: vandnps %xmm1, %xmm0, %xmm0
|
2017-04-19 06:36:59 +08:00
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%xor = xor <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
|
|
|
|
%and = and <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1>
|
|
|
|
ret <4 x i32> %and
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @and_xor_splat1_v4i64(<4 x i64> %x) nounwind {
|
|
|
|
; AVX-LABEL: and_xor_splat1_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2017-04-20 05:23:09 +08:00
|
|
|
; AVX-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0
|
2017-04-19 06:36:59 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: and_xor_splat1_v4i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0:
|
2017-07-27 20:32:45 +08:00
|
|
|
; AVX512-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
|
2017-04-20 05:23:09 +08:00
|
|
|
; AVX512-NEXT: vandnps %ymm1, %ymm0, %ymm0
|
2017-04-19 06:36:59 +08:00
|
|
|
; AVX512-NEXT: retq
|
|
|
|
%xor = xor <4 x i64> %x, <i64 1, i64 1, i64 1, i64 1>
|
|
|
|
%and = and <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1>
|
|
|
|
ret <4 x i64> %and
|
|
|
|
}
|
|
|
|
|