2015-11-24 05:33:58 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2018-06-15 02:08:06 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=ANY,AVX1
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=ANY,INT256,AVX2
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f | FileCheck %s --check-prefixes=ANY,INT256,AVX512
|
2011-07-13 09:15:33 +08:00
|
|
|
|
|
|
|
define <4 x double> @andpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: andpd256:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vandpd %ymm0, %ymm1, %ymm0
|
|
|
|
; ANY-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
|
|
; ANY-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %x to <4 x i64>
|
|
|
|
%1 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%and.i = and <4 x i64> %0, %1
|
|
|
|
%2 = bitcast <4 x i64> %and.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%3 = fadd <4 x double> %2, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %3
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @andpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: andpd256fold:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vandpd {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
|
|
; ANY-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%and.i = and <4 x i64> %0, <i64 4616752568008179712, i64 4614838538166547251, i64 4612361558371493478, i64 4608083138725491507>
|
|
|
|
%1 = bitcast <4 x i64> %and.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%2 = fadd <4 x double> %1, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %2
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @andps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: andps256:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vandps %ymm0, %ymm1, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %x to <8 x i32>
|
|
|
|
%1 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%and.i = and <8 x i32> %0, %1
|
|
|
|
%2 = bitcast <8 x i32> %and.i to <8 x float>
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @andps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: andps256fold:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%and.i = and <8 x i32> %0, <i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938, i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938>
|
|
|
|
%1 = bitcast <8 x i32> %and.i to <8 x float>
|
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @xorpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: xorpd256:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vxorpd %ymm0, %ymm1, %ymm0
|
|
|
|
; ANY-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
|
|
; ANY-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %x to <4 x i64>
|
|
|
|
%1 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%xor.i = xor <4 x i64> %0, %1
|
|
|
|
%2 = bitcast <4 x i64> %xor.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%3 = fadd <4 x double> %2, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %3
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @xorpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: xorpd256fold:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vxorpd {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
|
|
; ANY-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%xor.i = xor <4 x i64> %0, <i64 4616752568008179712, i64 4614838538166547251, i64 4612361558371493478, i64 4608083138725491507>
|
|
|
|
%1 = bitcast <4 x i64> %xor.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%2 = fadd <4 x double> %1, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %2
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @xorps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: xorps256:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vxorps %ymm0, %ymm1, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %x to <8 x i32>
|
|
|
|
%1 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%xor.i = xor <8 x i32> %0, %1
|
|
|
|
%2 = bitcast <8 x i32> %xor.i to <8 x float>
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @xorps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: xorps256fold:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%xor.i = xor <8 x i32> %0, <i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938, i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938>
|
|
|
|
%1 = bitcast <8 x i32> %xor.i to <8 x float>
|
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @orpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: orpd256:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vorpd %ymm0, %ymm1, %ymm0
|
|
|
|
; ANY-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
|
|
; ANY-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %x to <4 x i64>
|
|
|
|
%1 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%or.i = or <4 x i64> %0, %1
|
|
|
|
%2 = bitcast <4 x i64> %or.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%3 = fadd <4 x double> %2, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %3
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @orpd256fold(<4 x double> %y) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: orpd256fold:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vorpd {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
|
|
; ANY-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%or.i = or <4 x i64> %0, <i64 4616752568008179712, i64 4614838538166547251, i64 4612361558371493478, i64 4608083138725491507>
|
|
|
|
%1 = bitcast <4 x i64> %or.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%2 = fadd <4 x double> %1, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %2
|
2011-07-13 09:15:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @orps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: orps256:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vorps %ymm0, %ymm1, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %x to <8 x i32>
|
|
|
|
%1 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%or.i = or <8 x i32> %0, %1
|
|
|
|
%2 = bitcast <8 x i32> %or.i to <8 x float>
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @orps256fold(<8 x float> %y) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: orps256fold:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-13 09:15:33 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%or.i = or <8 x i32> %0, <i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938, i32 1083179008, i32 1079613850, i32 1075000115, i32 1067030938>
|
|
|
|
%1 = bitcast <8 x i32> %or.i to <8 x float>
|
|
|
|
ret <8 x float> %1
|
|
|
|
}
|
|
|
|
|
2011-07-14 05:36:51 +08:00
|
|
|
define <4 x double> @andnotpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: andnotpd256:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vandnpd %ymm0, %ymm1, %ymm0
|
|
|
|
; ANY-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
|
|
; ANY-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-14 05:36:51 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <4 x double> %x to <4 x i64>
|
|
|
|
%neg.i = xor <4 x i64> %0, <i64 -1, i64 -1, i64 -1, i64 -1>
|
|
|
|
%1 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%and.i = and <4 x i64> %1, %neg.i
|
|
|
|
%2 = bitcast <4 x i64> %and.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%3 = fadd <4 x double> %2, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %3
|
2011-07-14 05:36:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x double> @andnotpd256fold(<4 x double> %y, <4 x double>* nocapture %x) nounwind uwtable readonly ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: andnotpd256fold:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vandnpd (%rdi), %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
|
|
|
; ANY-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-14 05:36:51 +08:00
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp2 = load <4 x double>, <4 x double>* %x, align 32
|
2011-07-14 05:36:51 +08:00
|
|
|
%0 = bitcast <4 x double> %y to <4 x i64>
|
|
|
|
%neg.i = xor <4 x i64> %0, <i64 -1, i64 -1, i64 -1, i64 -1>
|
|
|
|
%1 = bitcast <4 x double> %tmp2 to <4 x i64>
|
|
|
|
%and.i = and <4 x i64> %1, %neg.i
|
|
|
|
%2 = bitcast <4 x i64> %and.i to <4 x double>
|
2011-11-15 13:55:35 +08:00
|
|
|
; add forces execution domain
|
|
|
|
%3 = fadd <4 x double> %2, <double 0x0, double 0x0, double 0x0, double 0x0>
|
|
|
|
ret <4 x double> %3
|
2011-07-14 05:36:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @andnotps256(<8 x float> %y, <8 x float> %x) nounwind uwtable readnone ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: andnotps256:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vandnps %ymm0, %ymm1, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-14 05:36:51 +08:00
|
|
|
entry:
|
|
|
|
%0 = bitcast <8 x float> %x to <8 x i32>
|
|
|
|
%neg.i = xor <8 x i32> %0, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
|
|
|
|
%1 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%and.i = and <8 x i32> %1, %neg.i
|
|
|
|
%2 = bitcast <8 x i32> %and.i to <8 x float>
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x float> @andnotps256fold(<8 x float> %y, <8 x float>* nocapture %x) nounwind uwtable readonly ssp {
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: andnotps256fold:
|
|
|
|
; ANY: # %bb.0: # %entry
|
|
|
|
; ANY-NEXT: vandnps (%rdi), %ymm0, %ymm0
|
|
|
|
; ANY-NEXT: retq
|
2011-07-14 05:36:51 +08:00
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp2 = load <8 x float>, <8 x float>* %x, align 32
|
2011-07-14 05:36:51 +08:00
|
|
|
%0 = bitcast <8 x float> %y to <8 x i32>
|
|
|
|
%neg.i = xor <8 x i32> %0, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
|
|
|
|
%1 = bitcast <8 x float> %tmp2 to <8 x i32>
|
|
|
|
%and.i = and <8 x i32> %1, %neg.i
|
|
|
|
%2 = bitcast <8 x i32> %and.i to <8 x float>
|
|
|
|
ret <8 x float> %2
|
|
|
|
}
|
2011-08-18 10:11:34 +08:00
|
|
|
|
|
|
|
;;; Test that basic 2 x i64 logic use the integer version on AVX
|
|
|
|
|
|
|
|
define <2 x i64> @vpandn(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
|
2011-11-08 07:08:21 +08:00
|
|
|
; Force the execution domain with an add.
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: vpandn:
|
|
|
|
; ANY: # %bb.0:
|
|
|
|
; ANY-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
|
|
|
|
; ANY-NEXT: vpsubq %xmm1, %xmm0, %xmm1
|
|
|
|
; ANY-NEXT: vpandn %xmm0, %xmm1, %xmm0
|
|
|
|
; ANY-NEXT: retq
|
2011-11-08 07:08:21 +08:00
|
|
|
%a2 = add <2 x i64> %a, <i64 1, i64 1>
|
|
|
|
%y = xor <2 x i64> %a2, <i64 -1, i64 -1>
|
2011-08-18 10:11:34 +08:00
|
|
|
%x = and <2 x i64> %a, %y
|
|
|
|
ret <2 x i64> %x
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @vpand(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
|
2011-11-08 07:08:21 +08:00
|
|
|
; Force the execution domain with an add.
|
2018-06-15 02:08:06 +08:00
|
|
|
; ANY-LABEL: vpand:
|
|
|
|
; ANY: # %bb.0:
|
|
|
|
; ANY-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
|
|
|
|
; ANY-NEXT: vpsubq %xmm2, %xmm0, %xmm0
|
|
|
|
; ANY-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
|
|
; ANY-NEXT: retq
|
2011-11-08 07:08:21 +08:00
|
|
|
%a2 = add <2 x i64> %a, <i64 1, i64 1>
|
|
|
|
%x = and <2 x i64> %a2, %b
|
2011-08-18 10:11:34 +08:00
|
|
|
ret <2 x i64> %x
|
|
|
|
}
|
|
|
|
|
2017-04-19 06:36:59 +08:00
|
|
|
define <4 x i32> @and_xor_splat1_v4i32(<4 x i32> %x) nounwind {
|
2018-06-15 02:08:06 +08:00
|
|
|
; AVX1-LABEL: and_xor_splat1_v4i32:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vandnps {{.*}}(%rip), %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: retq
|
2017-04-19 06:36:59 +08:00
|
|
|
;
|
2018-06-15 02:08:06 +08:00
|
|
|
; INT256-LABEL: and_xor_splat1_v4i32:
|
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
|
|
|
|
; INT256-NEXT: vandnps %xmm1, %xmm0, %xmm0
|
|
|
|
; INT256-NEXT: retq
|
2017-04-19 06:36:59 +08:00
|
|
|
%xor = xor <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
|
|
|
|
%and = and <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1>
|
|
|
|
ret <4 x i32> %and
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @and_xor_splat1_v4i64(<4 x i64> %x) nounwind {
|
2018-06-15 02:08:06 +08:00
|
|
|
; AVX1-LABEL: and_xor_splat1_v4i64:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
2017-04-19 06:36:59 +08:00
|
|
|
;
|
2018-06-15 02:08:06 +08:00
|
|
|
; INT256-LABEL: and_xor_splat1_v4i64:
|
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
|
|
|
|
; INT256-NEXT: vandnps %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: retq
|
2017-04-19 06:36:59 +08:00
|
|
|
%xor = xor <4 x i64> %x, <i64 1, i64 1, i64 1, i64 1>
|
|
|
|
%and = and <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1>
|
|
|
|
ret <4 x i64> %and
|
|
|
|
}
|
|
|
|
|
2018-06-15 02:08:06 +08:00
|
|
|
; PR37749 - https://bugs.llvm.org/show_bug.cgi?id=37749
|
|
|
|
; For AVX1, we don't want a 256-bit logic op with insert/extract to the surrounding 128-bit ops.
|
|
|
|
|
|
|
|
define <8 x i32> @and_disguised_i8_elts(<8 x i32> %x, <8 x i32> %y, <8 x i32> %z) {
|
|
|
|
; AVX1-LABEL: and_disguised_i8_elts:
|
|
|
|
; AVX1: # %bb.0:
|
2018-06-15 03:55:02 +08:00
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm3
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
2018-06-15 02:08:06 +08:00
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255]
|
2018-06-15 03:55:02 +08:00
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
|
|
|
; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm1
|
|
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
2018-06-15 02:08:06 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; INT256-LABEL: and_disguised_i8_elts:
|
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpaddd %ymm2, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: retq
|
|
|
|
%a = add <8 x i32> %x, %y
|
|
|
|
%l = and <8 x i32> %a, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
|
|
|
|
%t = add <8 x i32> %l, %z
|
|
|
|
ret <8 x i32> %t
|
|
|
|
}
|
|
|
|
|
2018-09-20 06:00:56 +08:00
|
|
|
define <8 x i32> @andn_disguised_i8_elts(<8 x i32> %x, <8 x i32> %y, <8 x i32> %z) {
|
|
|
|
; AVX1-LABEL: andn_disguised_i8_elts:
|
|
|
|
; AVX1: # %bb.0:
|
2018-10-30 22:14:34 +08:00
|
|
|
; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm3
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
2018-09-20 06:00:56 +08:00
|
|
|
; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255]
|
[x86] avoid 256-bit andnp that requires insert/extract with AVX1 (PR37449)
This is the final (I hope!) problem pattern mentioned in PR37749:
https://bugs.llvm.org/show_bug.cgi?id=37749
We are trying to avoid an AVX1 sinkhole caused by having 256-bit bitwise logic ops but no other 256-bit integer ops.
We've already solved the simple logic ops, but 'andn' is an x86 special. I looked at alternative solutions like
extending the generic DAG combine or trying to wait until the ANDNP node is created, but those are bigger patches
that can over-reach. Ie, splitting to 128-bit does not look like a win in most cases with >1 256-bit op.
The pattern matching is cluttered with bitcasts because of our i64 element canonicalization. For the affected test,
we have this vector-type-legalized sequence:
t29: v8i32 = concat_vectors t27, t28
t30: v4i64 = bitcast t29
t18: v8i32 = BUILD_VECTOR Constant:i32<-1>, Constant:i32<-1>, ...
t31: v4i64 = bitcast t18
t32: v4i64 = xor t30, t31
t9: v8i32 = BUILD_VECTOR Constant:i32<255>, Constant:i32<255>, ...
t34: v4i64 = bitcast t9
t35: v4i64 = and t32, t34
t36: v8i32 = bitcast t35
t37: v4i32 = extract_subvector t36, Constant:i64<0>
t38: v4i32 = extract_subvector t36, Constant:i64<4>
Differential Revision: https://reviews.llvm.org/D52318
llvm-svn: 343008
2018-09-26 03:09:34 +08:00
|
|
|
; AVX1-NEXT: vpandn %xmm1, %xmm0, %xmm0
|
2018-10-30 22:14:34 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
|
|
|
; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0
|
[x86] avoid 256-bit andnp that requires insert/extract with AVX1 (PR37449)
This is the final (I hope!) problem pattern mentioned in PR37749:
https://bugs.llvm.org/show_bug.cgi?id=37749
We are trying to avoid an AVX1 sinkhole caused by having 256-bit bitwise logic ops but no other 256-bit integer ops.
We've already solved the simple logic ops, but 'andn' is an x86 special. I looked at alternative solutions like
extending the generic DAG combine or trying to wait until the ANDNP node is created, but those are bigger patches
that can over-reach. Ie, splitting to 128-bit does not look like a win in most cases with >1 256-bit op.
The pattern matching is cluttered with bitcasts because of our i64 element canonicalization. For the affected test,
we have this vector-type-legalized sequence:
t29: v8i32 = concat_vectors t27, t28
t30: v4i64 = bitcast t29
t18: v8i32 = BUILD_VECTOR Constant:i32<-1>, Constant:i32<-1>, ...
t31: v4i64 = bitcast t18
t32: v4i64 = xor t30, t31
t9: v8i32 = BUILD_VECTOR Constant:i32<255>, Constant:i32<255>, ...
t34: v4i64 = bitcast t9
t35: v4i64 = and t32, t34
t36: v8i32 = bitcast t35
t37: v4i32 = extract_subvector t36, Constant:i64<0>
t38: v4i32 = extract_subvector t36, Constant:i64<4>
Differential Revision: https://reviews.llvm.org/D52318
llvm-svn: 343008
2018-09-26 03:09:34 +08:00
|
|
|
; AVX1-NEXT: vpandn %xmm1, %xmm3, %xmm1
|
2018-10-30 22:14:34 +08:00
|
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
2018-09-20 06:00:56 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; INT256-LABEL: andn_disguised_i8_elts:
|
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vpaddd %ymm0, %ymm1, %ymm0
|
|
|
|
; INT256-NEXT: vpandn {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpaddd %ymm2, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: retq
|
|
|
|
%add = add <8 x i32> %y, %x
|
|
|
|
%neg = and <8 x i32> %add, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
|
|
|
|
%and = xor <8 x i32> %neg, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
|
|
|
|
%add1 = add <8 x i32> %and, %z
|
|
|
|
ret <8 x i32> %add1
|
|
|
|
}
|
|
|
|
|
[x86] avoid 256-bit andnp that requires insert/extract with AVX1 (PR37449)
This is the final (I hope!) problem pattern mentioned in PR37749:
https://bugs.llvm.org/show_bug.cgi?id=37749
We are trying to avoid an AVX1 sinkhole caused by having 256-bit bitwise logic ops but no other 256-bit integer ops.
We've already solved the simple logic ops, but 'andn' is an x86 special. I looked at alternative solutions like
extending the generic DAG combine or trying to wait until the ANDNP node is created, but those are bigger patches
that can over-reach. Ie, splitting to 128-bit does not look like a win in most cases with >1 256-bit op.
The pattern matching is cluttered with bitcasts because of our i64 element canonicalization. For the affected test,
we have this vector-type-legalized sequence:
t29: v8i32 = concat_vectors t27, t28
t30: v4i64 = bitcast t29
t18: v8i32 = BUILD_VECTOR Constant:i32<-1>, Constant:i32<-1>, ...
t31: v4i64 = bitcast t18
t32: v4i64 = xor t30, t31
t9: v8i32 = BUILD_VECTOR Constant:i32<255>, Constant:i32<255>, ...
t34: v4i64 = bitcast t9
t35: v4i64 = and t32, t34
t36: v8i32 = bitcast t35
t37: v4i32 = extract_subvector t36, Constant:i64<0>
t38: v4i32 = extract_subvector t36, Constant:i64<4>
Differential Revision: https://reviews.llvm.org/D52318
llvm-svn: 343008
2018-09-26 03:09:34 +08:00
|
|
|
; Negative test - if we don't have a leading concat_vectors, the transform won't be profitable.
|
|
|
|
|
2018-09-22 05:25:16 +08:00
|
|
|
define <8 x i32> @andn_variable_mask_operand_no_concat(<8 x i32> %x, <8 x i32> %y, <8 x i32> %z) {
|
|
|
|
; AVX1-LABEL: andn_variable_mask_operand_no_concat:
|
2018-09-22 02:24:53 +08:00
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vandnps %ymm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2018-09-22 05:25:16 +08:00
|
|
|
; INT256-LABEL: andn_variable_mask_operand_no_concat:
|
2018-09-22 02:24:53 +08:00
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vpandn %ymm2, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: retq
|
|
|
|
%and = and <8 x i32> %x, %z
|
|
|
|
%xor = xor <8 x i32> %and, %z ; demanded bits will make this a 'not'
|
|
|
|
%add = add <8 x i32> %xor, %y
|
|
|
|
ret <8 x i32> %add
|
|
|
|
}
|
|
|
|
|
[x86] avoid 256-bit andnp that requires insert/extract with AVX1 (PR37449)
This is the final (I hope!) problem pattern mentioned in PR37749:
https://bugs.llvm.org/show_bug.cgi?id=37749
We are trying to avoid an AVX1 sinkhole caused by having 256-bit bitwise logic ops but no other 256-bit integer ops.
We've already solved the simple logic ops, but 'andn' is an x86 special. I looked at alternative solutions like
extending the generic DAG combine or trying to wait until the ANDNP node is created, but those are bigger patches
that can over-reach. Ie, splitting to 128-bit does not look like a win in most cases with >1 256-bit op.
The pattern matching is cluttered with bitcasts because of our i64 element canonicalization. For the affected test,
we have this vector-type-legalized sequence:
t29: v8i32 = concat_vectors t27, t28
t30: v4i64 = bitcast t29
t18: v8i32 = BUILD_VECTOR Constant:i32<-1>, Constant:i32<-1>, ...
t31: v4i64 = bitcast t18
t32: v4i64 = xor t30, t31
t9: v8i32 = BUILD_VECTOR Constant:i32<255>, Constant:i32<255>, ...
t34: v4i64 = bitcast t9
t35: v4i64 = and t32, t34
t36: v8i32 = bitcast t35
t37: v4i32 = extract_subvector t36, Constant:i64<0>
t38: v4i32 = extract_subvector t36, Constant:i64<4>
Differential Revision: https://reviews.llvm.org/D52318
llvm-svn: 343008
2018-09-26 03:09:34 +08:00
|
|
|
; Negative test - if we don't have a leading concat_vectors, the transform won't be profitable (even if the mask is a constant).
|
|
|
|
|
2018-09-22 05:25:16 +08:00
|
|
|
define <8 x i32> @andn_constant_mask_operand_no_concat(<8 x i32> %x, <8 x i32> %y) {
|
|
|
|
; AVX1-LABEL: andn_constant_mask_operand_no_concat:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; INT256-LABEL: andn_constant_mask_operand_no_concat:
|
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vpandn {{.*}}(%rip), %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: retq
|
|
|
|
%xor = xor <8 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
|
|
|
|
%and = and <8 x i32> %xor, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
|
|
|
|
%r = add <8 x i32> %and, %y
|
|
|
|
ret <8 x i32> %r
|
|
|
|
}
|
|
|
|
|
[x86] avoid 256-bit andnp that requires insert/extract with AVX1 (PR37449)
This is the final (I hope!) problem pattern mentioned in PR37749:
https://bugs.llvm.org/show_bug.cgi?id=37749
We are trying to avoid an AVX1 sinkhole caused by having 256-bit bitwise logic ops but no other 256-bit integer ops.
We've already solved the simple logic ops, but 'andn' is an x86 special. I looked at alternative solutions like
extending the generic DAG combine or trying to wait until the ANDNP node is created, but those are bigger patches
that can over-reach. Ie, splitting to 128-bit does not look like a win in most cases with >1 256-bit op.
The pattern matching is cluttered with bitcasts because of our i64 element canonicalization. For the affected test,
we have this vector-type-legalized sequence:
t29: v8i32 = concat_vectors t27, t28
t30: v4i64 = bitcast t29
t18: v8i32 = BUILD_VECTOR Constant:i32<-1>, Constant:i32<-1>, ...
t31: v4i64 = bitcast t18
t32: v4i64 = xor t30, t31
t9: v8i32 = BUILD_VECTOR Constant:i32<255>, Constant:i32<255>, ...
t34: v4i64 = bitcast t9
t35: v4i64 = and t32, t34
t36: v8i32 = bitcast t35
t37: v4i32 = extract_subvector t36, Constant:i64<0>
t38: v4i32 = extract_subvector t36, Constant:i64<4>
Differential Revision: https://reviews.llvm.org/D52318
llvm-svn: 343008
2018-09-26 03:09:34 +08:00
|
|
|
; This is a close call, but we split the 'andn' to reduce the insert/extract.
|
|
|
|
|
2018-09-22 05:25:16 +08:00
|
|
|
define <8 x i32> @andn_variable_mask_operand_concat(<8 x i32> %x, <8 x i32> %y, <8 x i32> %z, <8 x i32> %w) {
|
|
|
|
; AVX1-LABEL: andn_variable_mask_operand_concat:
|
|
|
|
; AVX1: # %bb.0:
|
2018-10-30 22:14:34 +08:00
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm4
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
2018-09-22 05:25:16 +08:00
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
[x86] avoid 256-bit andnp that requires insert/extract with AVX1 (PR37449)
This is the final (I hope!) problem pattern mentioned in PR37749:
https://bugs.llvm.org/show_bug.cgi?id=37749
We are trying to avoid an AVX1 sinkhole caused by having 256-bit bitwise logic ops but no other 256-bit integer ops.
We've already solved the simple logic ops, but 'andn' is an x86 special. I looked at alternative solutions like
extending the generic DAG combine or trying to wait until the ANDNP node is created, but those are bigger patches
that can over-reach. Ie, splitting to 128-bit does not look like a win in most cases with >1 256-bit op.
The pattern matching is cluttered with bitcasts because of our i64 element canonicalization. For the affected test,
we have this vector-type-legalized sequence:
t29: v8i32 = concat_vectors t27, t28
t30: v4i64 = bitcast t29
t18: v8i32 = BUILD_VECTOR Constant:i32<-1>, Constant:i32<-1>, ...
t31: v4i64 = bitcast t18
t32: v4i64 = xor t30, t31
t9: v8i32 = BUILD_VECTOR Constant:i32<255>, Constant:i32<255>, ...
t34: v4i64 = bitcast t9
t35: v4i64 = and t32, t34
t36: v8i32 = bitcast t35
t37: v4i32 = extract_subvector t36, Constant:i64<0>
t38: v4i32 = extract_subvector t36, Constant:i64<4>
Differential Revision: https://reviews.llvm.org/D52318
llvm-svn: 343008
2018-09-26 03:09:34 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
|
2018-10-30 22:14:34 +08:00
|
|
|
; AVX1-NEXT: vpandn %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm1
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpandn %xmm2, %xmm4, %xmm1
|
|
|
|
; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
2018-09-22 05:25:16 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; INT256-LABEL: andn_variable_mask_operand_concat:
|
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpandn %ymm2, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpaddd %ymm3, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: retq
|
|
|
|
%add = add <8 x i32> %x, %y
|
|
|
|
%xor = xor <8 x i32> %add, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
|
|
|
|
%and = and <8 x i32> %xor, %z
|
|
|
|
%r = add <8 x i32> %and, %w
|
|
|
|
ret <8 x i32> %r
|
|
|
|
}
|
|
|
|
|
2018-06-15 02:08:06 +08:00
|
|
|
define <8 x i32> @or_disguised_i8_elts(<8 x i32> %x, <8 x i32> %y, <8 x i32> %z) {
|
|
|
|
; AVX1-LABEL: or_disguised_i8_elts:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm3
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255]
|
2018-06-15 02:08:06 +08:00
|
|
|
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
|
|
|
; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
|
|
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; INT256-LABEL: or_disguised_i8_elts:
|
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
|
|
|
|
; INT256-NEXT: vpor %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpaddd %ymm2, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: retq
|
|
|
|
%a = add <8 x i32> %x, %y
|
|
|
|
%l = or <8 x i32> %a, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
|
|
|
|
%t = add <8 x i32> %l, %z
|
|
|
|
ret <8 x i32> %t
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @xor_disguised_i8_elts(<8 x i32> %x, <8 x i32> %y, <8 x i32> %z) {
|
|
|
|
; AVX1-LABEL: xor_disguised_i8_elts:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm3
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255]
|
2018-06-15 02:08:06 +08:00
|
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
|
|
|
; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm3, %xmm1
|
|
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; INT256-LABEL: xor_disguised_i8_elts:
|
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
|
|
|
|
; INT256-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpaddd %ymm2, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: retq
|
|
|
|
%a = add <8 x i32> %x, %y
|
|
|
|
%l = xor <8 x i32> %a, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
|
|
|
|
%t = add <8 x i32> %l, %z
|
|
|
|
ret <8 x i32> %t
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @and_disguised_i16_elts(<8 x i32> %x, <8 x i32> %y, <8 x i32> %z) {
|
|
|
|
; AVX1-LABEL: and_disguised_i16_elts:
|
|
|
|
; AVX1: # %bb.0:
|
2018-06-15 03:55:02 +08:00
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm3
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
2018-06-15 02:08:06 +08:00
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
2018-06-15 03:55:02 +08:00
|
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
|
|
|
; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2],xmm1[3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
|
|
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
2018-06-15 02:08:06 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; INT256-LABEL: and_disguised_i16_elts:
|
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
|
|
; INT256-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
|
|
|
|
; INT256-NEXT: vpaddd %ymm2, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: retq
|
|
|
|
%a = add <8 x i32> %x, %y
|
|
|
|
%l = and <8 x i32> %a, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
|
|
|
|
%t = add <8 x i32> %l, %z
|
|
|
|
ret <8 x i32> %t
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @or_disguised_i16_elts(<8 x i32> %x, <8 x i32> %y, <8 x i32> %z) {
|
|
|
|
; AVX1-LABEL: or_disguised_i16_elts:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm3
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
|
2018-06-15 02:08:06 +08:00
|
|
|
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
|
|
|
; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
|
|
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; INT256-LABEL: or_disguised_i16_elts:
|
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535]
|
|
|
|
; INT256-NEXT: vpor %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpaddd %ymm2, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: retq
|
|
|
|
%a = add <8 x i32> %x, %y
|
|
|
|
%l = or <8 x i32> %a, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
|
|
|
|
%t = add <8 x i32> %l, %z
|
|
|
|
ret <8 x i32> %t
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @xor_disguised_i16_elts(<8 x i32> %x, <8 x i32> %y, <8 x i32> %z) {
|
|
|
|
; AVX1-LABEL: xor_disguised_i16_elts:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm3
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
[X86] Move promotion of vector and/or/xor from legalization to DAG combine
Summary:
I've noticed that the bitcasts we introduce for these make computeKnownBits and computeNumSignBits not work well in LegalizeVectorOps. LegalizeVectorOps legalizes bottom up while LegalizeDAG legalizes top down. The bottom up strategy for LegalizeVectorOps means operands are legalized before their uses. So we promote and/or/xor before we legalize the operands that use them making computeKnownBits/computeNumSignBits in places like LowerTruncate suboptimal. I looked at changing LegalizeVectorOps to be top down as well, but that was more disruptive and caused some regressions. I also looked at just moving promotion of binops to LegalizeDAG, but that had a few issues one around matching AND,ANDN,OR into VSELECT because I had to create ANDN as vXi64, but the other nodes hadn't legalized yet, I didn't look too hard at fixing that.
This patch seems to produce better results overall than my other attempts. We now form broadcasts of constants better in some cases. For at least some of them the AND was being introduced in LegalizeDAG, promoted to vXi64, and the BUILD_VECTOR was also legalized there. I think we got bad ordering of that. Now the promotion is out of the legalizer so we handle this better.
In the longer term I think we really should evaluate whether we should be doing this promotion at all. It's really there to reduce isel pattern count, but I'm wondering if we'd be better served just eating the pattern cost or doing C++ based isel for vector and/or/xor in X86ISelDAGToDAG. The masked and/or/xor will definitely be difficult in patterns if a bitcast gets between the vselect and the and/or/xor node. That becomes a lot of permutations to cover.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D53107
llvm-svn: 344487
2018-10-15 09:51:58 +08:00
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
|
2018-06-15 02:08:06 +08:00
|
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
|
|
|
; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm3, %xmm1
|
|
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; INT256-LABEL: xor_disguised_i16_elts:
|
|
|
|
; INT256: # %bb.0:
|
|
|
|
; INT256-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535]
|
|
|
|
; INT256-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: vpaddd %ymm2, %ymm0, %ymm0
|
|
|
|
; INT256-NEXT: retq
|
|
|
|
%a = add <8 x i32> %x, %y
|
|
|
|
%l = xor <8 x i32> %a, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
|
|
|
|
%t = add <8 x i32> %l, %z
|
|
|
|
ret <8 x i32> %t
|
|
|
|
}
|
|
|
|
|