forked from OSchip/llvm-project
[X86][SSE] Fix modulo rotation amounts for v8i16/v16i16/v4i32 (PR38243)
llvm-svn: 349047
This commit is contained in:
parent
f5f3bef035
commit
ba91ff4a86
|
@ -24881,8 +24881,6 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
|
|||
return SignBitSelect(VT, Amt, M, R);
|
||||
}
|
||||
|
||||
// TODO: We need explicit modulo rotation amounts for everything from here on.
|
||||
|
||||
bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
|
||||
bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
|
||||
SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
|
||||
|
@ -24891,6 +24889,7 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
|
|||
// Best to fallback for all supported variable shifts.
|
||||
// AVX2 - best to fallback for non-constants as well.
|
||||
// TODO - legalizers should be able to handle this.
|
||||
// TODO - We need explicit modulo rotation amounts.
|
||||
if (LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt) ||
|
||||
DAG.isSplatValue(Amt)) {
|
||||
SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
|
||||
|
@ -24900,6 +24899,10 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
|
|||
return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
|
||||
}
|
||||
|
||||
// ISD::ROT* uses modulo rotate amounts.
|
||||
Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
|
||||
DAG.getConstant(EltSizeInBits - 1, DL, VT));
|
||||
|
||||
// As with shifts, convert the rotation amount to a multiplication factor.
|
||||
SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
|
||||
assert(Scale && "Failed to convert ROTL amount to scale");
|
||||
|
|
|
@ -138,6 +138,7 @@ define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
|
|||
define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; SSE2-LABEL: var_rotate_v4i32:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pslld $23, %xmm1
|
||||
; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
|
||||
; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
|
||||
|
@ -157,6 +158,7 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
|||
; SSE41-LABEL: var_rotate_v4i32:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE41-NEXT: pslld $23, %xmm1
|
||||
; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1
|
||||
; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
|
||||
|
@ -173,6 +175,7 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
|||
; AVX1-LABEL: var_rotate_v4i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
|
||||
|
@ -230,6 +233,7 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
|||
;
|
||||
; X32-SSE-LABEL: var_rotate_v4i32:
|
||||
; X32-SSE: # %bb.0:
|
||||
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
|
||||
; X32-SSE-NEXT: pslld $23, %xmm1
|
||||
; X32-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm1
|
||||
; X32-SSE-NEXT: cvttps2dq %xmm1, %xmm1
|
||||
|
@ -255,6 +259,7 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
|||
define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; SSE2-LABEL: var_rotate_v8i16:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pxor %xmm2, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
||||
|
@ -281,6 +286,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
;
|
||||
; SSE41-LABEL: var_rotate_v8i16:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
||||
; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
||||
; SSE41-NEXT: pslld $23, %xmm1
|
||||
|
@ -299,6 +305,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
;
|
||||
; AVX1-LABEL: var_rotate_v8i16:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
||||
; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
|
||||
|
@ -390,6 +397,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
;
|
||||
; X32-SSE-LABEL: var_rotate_v8i16:
|
||||
; X32-SSE: # %bb.0:
|
||||
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
|
||||
; X32-SSE-NEXT: pxor %xmm2, %xmm2
|
||||
; X32-SSE-NEXT: movdqa %xmm1, %xmm3
|
||||
; X32-SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
||||
|
|
|
@ -104,22 +104,25 @@ define <8 x i32> @var_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
|
|||
; AVX1-LABEL: var_rotate_v8i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [31,31,31,31]
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
|
||||
; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
|
||||
; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm4, %xmm6, %xmm4
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm2
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
|
||||
; AVX1-NEXT: vpor %xmm5, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm5, %xmm7, %xmm5
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm6, %xmm2
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7]
|
||||
; AVX1-NEXT: vpor %xmm6, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
||||
|
@ -196,28 +199,31 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
|||
; AVX1-LABEL: var_rotate_v16i16:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
|
||||
; AVX1-NEXT: vpslld $23, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
|
||||
; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vcvttps2dq %xmm4, %xmm4
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
|
||||
; AVX1-NEXT: vpslld $23, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1065353216,1065353216,1065353216,1065353216]
|
||||
; AVX1-NEXT: vpaddd %xmm6, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vcvttps2dq %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
|
||||
; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddd %xmm6, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; AVX1-NEXT: vpmulhuw %xmm2, %xmm4, %xmm6
|
||||
; AVX1-NEXT: vpmullw %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpor %xmm6, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
|
||||
; AVX1-NEXT: vpackusdw %xmm5, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
|
||||
; AVX1-NEXT: vpmulhuw %xmm2, %xmm5, %xmm7
|
||||
; AVX1-NEXT: vpmullw %xmm2, %xmm5, %xmm2
|
||||
; AVX1-NEXT: vpor %xmm7, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
|
||||
; AVX1-NEXT: vpslld $23, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddd %xmm5, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vcvttps2dq %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
||||
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpaddd %xmm5, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpaddd %xmm6, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm3
|
||||
|
|
Loading…
Reference in New Issue