[TargetLowering] Use ISD::FSHR in expandFixedPointMul

Replace OR(SHL,SRL) pattern with ISD::FSHR (legalization expands this later if necessary) - this helps with the scale == 0 'undefined' drop-through case that was discussed on D55720.

llvm-svn: 353546
This commit is contained in:
Simon Pilgrim 2019-02-08 18:57:38 +00:00
parent 01486b22bb
commit eb6a47a462
2 changed files with 8 additions and 11 deletions

View File

@ -5512,9 +5512,6 @@ TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const {
// are scaled. The result is given to us in 2 halves, so we only want part of // are scaled. The result is given to us in 2 halves, so we only want part of
// both in the result. // both in the result.
EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout());
Lo = DAG.getNode(ISD::SRL, dl, VT, Lo, DAG.getConstant(Scale, dl, ShiftTy)); return DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo,
Hi = DAG.getNode( DAG.getConstant(Scale, dl, ShiftTy));
ISD::SHL, dl, VT, Hi,
DAG.getConstant(VT.getScalarSizeInBits() - Scale, dl, ShiftTy));
return DAG.getNode(ISD::OR, dl, VT, Lo, Hi);
} }

View File

@ -104,16 +104,16 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
; X64: # %bb.0: ; X64: # %bb.0:
; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X64-NEXT: pmuludq %xmm1, %xmm0 ; X64-NEXT: pmuludq %xmm1, %xmm0
; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3]
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; X64-NEXT: pmuludq %xmm2, %xmm1 ; X64-NEXT: pmuludq %xmm2, %xmm1
; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
; X64-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] ; X64-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; X64-NEXT: pslld $30, %xmm3 ; X64-NEXT: psrld $2, %xmm3
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-NEXT: psrld $2, %xmm0 ; X64-NEXT: pslld $30, %xmm0
; X64-NEXT: por %xmm3, %xmm0 ; X64-NEXT: por %xmm3, %xmm0
; X64-NEXT: retq ; X64-NEXT: retq
; ;