forked from OSchip/llvm-project
[SelectionDAGBuilder] Enable funnel shift building to custom rotates
This patch enables funnel shift -> rotate building for all ROTL/ROTR custom/legal operations. AFAICT X86 was the last target that was missing modulo support (PR38243), but I've tried to CC stakeholders for every target that has ROTL/ROTR custom handling for their final OK. Differential Revision: https://reviews.llvm.org/D55747 llvm-svn: 349765
This commit is contained in:
parent
eb3a64a4da
commit
b208255fe0
|
@ -5762,17 +5762,15 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
|
|||
// avoid the select that is necessary in the general case to filter out
|
||||
// the 0-shift possibility that leads to UB.
|
||||
if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) {
|
||||
// TODO: This should also be done if the operation is custom, but we have
|
||||
// to make sure targets are handling the modulo shift amount as expected.
|
||||
auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
|
||||
if (TLI.isOperationLegal(RotateOpcode, VT)) {
|
||||
if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
|
||||
setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Some targets only rotate one way. Try the opposite direction.
|
||||
RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL;
|
||||
if (TLI.isOperationLegal(RotateOpcode, VT)) {
|
||||
if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
|
||||
// Negate the shift amount because it is safe to ignore the high bits.
|
||||
SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
|
||||
setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt));
|
||||
|
|
|
@ -96,51 +96,32 @@ define i32 @rotl_i32(i32 %x, i32 %z) nounwind {
|
|||
define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
|
||||
; X32-SSE2-LABEL: rotl_v4i32:
|
||||
; X32-SSE2: # %bb.0:
|
||||
; X32-SSE2-NEXT: pxor %xmm3, %xmm3
|
||||
; X32-SSE2-NEXT: psubd %xmm1, %xmm3
|
||||
; X32-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31]
|
||||
; X32-SSE2-NEXT: pand %xmm4, %xmm3
|
||||
; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7]
|
||||
; X32-SSE2-NEXT: movdqa %xmm0, %xmm5
|
||||
; X32-SSE2-NEXT: psrld %xmm2, %xmm5
|
||||
; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm3[0,1,1,1,4,5,6,7]
|
||||
; X32-SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; X32-SSE2-NEXT: psrld %xmm6, %xmm2
|
||||
; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
|
||||
; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[2,3,3,3,4,5,6,7]
|
||||
; X32-SSE2-NEXT: movdqa %xmm0, %xmm6
|
||||
; X32-SSE2-NEXT: psrld %xmm5, %xmm6
|
||||
; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,1,4,5,6,7]
|
||||
; X32-SSE2-NEXT: movdqa %xmm0, %xmm5
|
||||
; X32-SSE2-NEXT: psrld %xmm3, %xmm5
|
||||
; X32-SSE2-NEXT: punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1]
|
||||
; X32-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm5[0,3]
|
||||
; X32-SSE2-NEXT: pand %xmm4, %xmm1
|
||||
; X32-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
|
||||
; X32-SSE2-NEXT: pslld $23, %xmm1
|
||||
; X32-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
|
||||
; X32-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X32-SSE2-NEXT: pmuludq %xmm3, %xmm1
|
||||
; X32-SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X32-SSE2-NEXT: orps %xmm0, %xmm2
|
||||
; X32-SSE2-NEXT: movaps %xmm2, %xmm0
|
||||
; X32-SSE2-NEXT: por %xmm3, %xmm0
|
||||
; X32-SSE2-NEXT: retl
|
||||
;
|
||||
; X64-AVX2-LABEL: rotl_v4i32:
|
||||
; X64-AVX2: # %bb.0:
|
||||
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
|
||||
; X64-AVX2-NEXT: vpand %xmm2, %xmm1, %xmm3
|
||||
; X64-AVX2-NEXT: vpsllvd %xmm3, %xmm0, %xmm3
|
||||
; X64-AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; X64-AVX2-NEXT: vpsubd %xmm1, %xmm4, %xmm1
|
||||
; X64-AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; X64-AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2
|
||||
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
|
||||
; X64-AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
||||
; X64-AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
|
||||
; X64-AVX2-NEXT: vpor %xmm0, %xmm3, %xmm0
|
||||
; X64-AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; X64-AVX2-NEXT: retq
|
||||
%f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
|
||||
ret <4 x i32> %f
|
||||
|
@ -273,52 +254,36 @@ define i64 @rotr_i64(i64 %x, i64 %z) nounwind {
|
|||
define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
|
||||
; X32-SSE2-LABEL: rotr_v4i32:
|
||||
; X32-SSE2: # %bb.0:
|
||||
; X32-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31]
|
||||
; X32-SSE2-NEXT: pxor %xmm3, %xmm3
|
||||
; X32-SSE2-NEXT: psubd %xmm1, %xmm3
|
||||
; X32-SSE2-NEXT: movdqa %xmm1, %xmm4
|
||||
; X32-SSE2-NEXT: pand %xmm2, %xmm4
|
||||
; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[2,3,3,3,4,5,6,7]
|
||||
; X32-SSE2-NEXT: movdqa %xmm0, %xmm5
|
||||
; X32-SSE2-NEXT: psrld %xmm1, %xmm5
|
||||
; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm4[0,1,1,1,4,5,6,7]
|
||||
; X32-SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; X32-SSE2-NEXT: psrld %xmm6, %xmm1
|
||||
; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
|
||||
; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm4[2,3,3,3,4,5,6,7]
|
||||
; X32-SSE2-NEXT: movdqa %xmm0, %xmm6
|
||||
; X32-SSE2-NEXT: psrld %xmm5, %xmm6
|
||||
; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,1,1,4,5,6,7]
|
||||
; X32-SSE2-NEXT: movdqa %xmm0, %xmm5
|
||||
; X32-SSE2-NEXT: psrld %xmm4, %xmm5
|
||||
; X32-SSE2-NEXT: punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm6[1]
|
||||
; X32-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm5[0,3]
|
||||
; X32-SSE2-NEXT: pand %xmm2, %xmm3
|
||||
; X32-SSE2-NEXT: pslld $23, %xmm3
|
||||
; X32-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm3
|
||||
; X32-SSE2-NEXT: cvttps2dq %xmm3, %xmm2
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; X32-SSE2-NEXT: pmuludq %xmm2, %xmm0
|
||||
; X32-SSE2-NEXT: pxor %xmm2, %xmm2
|
||||
; X32-SSE2-NEXT: psubd %xmm1, %xmm2
|
||||
; X32-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
|
||||
; X32-SSE2-NEXT: pslld $23, %xmm2
|
||||
; X32-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2
|
||||
; X32-SSE2-NEXT: cvttps2dq %xmm2, %xmm1
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X32-SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
||||
; X32-SSE2-NEXT: pmuludq %xmm3, %xmm2
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
||||
; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
||||
; X32-SSE2-NEXT: orps %xmm0, %xmm1
|
||||
; X32-SSE2-NEXT: movaps %xmm1, %xmm0
|
||||
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X32-SSE2-NEXT: por %xmm3, %xmm0
|
||||
; X32-SSE2-NEXT: retl
|
||||
;
|
||||
; X64-AVX2-LABEL: rotr_v4i32:
|
||||
; X64-AVX2: # %bb.0:
|
||||
; X64-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; X64-AVX2-NEXT: vpsubd %xmm1, %xmm2, %xmm1
|
||||
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
|
||||
; X64-AVX2-NEXT: vpand %xmm2, %xmm1, %xmm3
|
||||
; X64-AVX2-NEXT: vpsrlvd %xmm3, %xmm0, %xmm3
|
||||
; X64-AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; X64-AVX2-NEXT: vpsubd %xmm1, %xmm4, %xmm1
|
||||
; X64-AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; X64-AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
|
||||
; X64-AVX2-NEXT: vpor %xmm3, %xmm0, %xmm0
|
||||
; X64-AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2
|
||||
; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
|
||||
; X64-AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
||||
; X64-AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
|
||||
; X64-AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; X64-AVX2-NEXT: retq
|
||||
%f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
|
||||
ret <4 x i32> %f
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -16,14 +16,7 @@ declare <64 x i8> @llvm.fshl.v64i8(<64 x i8>, <64 x i8>, <64 x i8>)
|
|||
define <8 x i64> @var_funnnel_v8i64(<8 x i64> %x, <8 x i64> %amt) nounwind {
|
||||
; AVX512-LABEL: var_funnnel_v8i64:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm2 = [63,63,63,63,63,63,63,63]
|
||||
; AVX512-NEXT: vpandq %zmm2, %zmm1, %zmm3
|
||||
; AVX512-NEXT: vpsllvq %zmm3, %zmm0, %zmm3
|
||||
; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; AVX512-NEXT: vpsubq %zmm1, %zmm4, %zmm1
|
||||
; AVX512-NEXT: vpandq %zmm2, %zmm1, %zmm1
|
||||
; AVX512-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vporq %zmm0, %zmm3, %zmm0
|
||||
; AVX512-NEXT: vprolvq %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> %amt)
|
||||
ret <8 x i64> %res
|
||||
|
@ -32,14 +25,7 @@ define <8 x i64> @var_funnnel_v8i64(<8 x i64> %x, <8 x i64> %amt) nounwind {
|
|||
define <16 x i32> @var_funnnel_v16i32(<16 x i32> %x, <16 x i32> %amt) nounwind {
|
||||
; AVX512-LABEL: var_funnnel_v16i32:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
|
||||
; AVX512-NEXT: vpandd %zmm2, %zmm1, %zmm3
|
||||
; AVX512-NEXT: vpsllvd %zmm3, %zmm0, %zmm3
|
||||
; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; AVX512-NEXT: vpsubd %zmm1, %zmm4, %zmm1
|
||||
; AVX512-NEXT: vpandd %zmm2, %zmm1, %zmm1
|
||||
; AVX512-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vpord %zmm0, %zmm3, %zmm0
|
||||
; AVX512-NEXT: vprolvd %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%res = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> %amt)
|
||||
ret <16 x i32> %res
|
||||
|
@ -313,14 +299,7 @@ define <8 x i64> @splatvar_funnnel_v8i64(<8 x i64> %x, <8 x i64> %amt) nounwind
|
|||
; AVX512-LABEL: splatvar_funnnel_v8i64:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastq %xmm1, %zmm1
|
||||
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [63,63]
|
||||
; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm3
|
||||
; AVX512-NEXT: vpsllq %xmm3, %zmm0, %zmm3
|
||||
; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; AVX512-NEXT: vpsubq %xmm1, %xmm4, %xmm1
|
||||
; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; AVX512-NEXT: vpsrlq %xmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vporq %zmm0, %zmm3, %zmm0
|
||||
; AVX512-NEXT: vprolvq %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%splat = shufflevector <8 x i64> %amt, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%res = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> %splat)
|
||||
|
@ -331,16 +310,7 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %amt) nounw
|
|||
; AVX512-LABEL: splatvar_funnnel_v16i32:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastd %xmm1, %zmm1
|
||||
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
|
||||
; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm3
|
||||
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
|
||||
; AVX512-NEXT: vpslld %xmm3, %zmm0, %zmm3
|
||||
; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; AVX512-NEXT: vpsubd %xmm1, %xmm4, %xmm1
|
||||
; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
||||
; AVX512-NEXT: vpsrld %xmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vpord %zmm0, %zmm3, %zmm0
|
||||
; AVX512-NEXT: vprolvd %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%splat = shufflevector <16 x i32> %amt, <16 x i32> undef, <16 x i32> zeroinitializer
|
||||
%res = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> %splat)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -16,14 +16,7 @@ declare <64 x i8> @llvm.fshr.v64i8(<64 x i8>, <64 x i8>, <64 x i8>)
|
|||
define <8 x i64> @var_funnnel_v8i64(<8 x i64> %x, <8 x i64> %amt) nounwind {
|
||||
; AVX512-LABEL: var_funnnel_v8i64:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm2 = [63,63,63,63,63,63,63,63]
|
||||
; AVX512-NEXT: vpandq %zmm2, %zmm1, %zmm3
|
||||
; AVX512-NEXT: vpsrlvq %zmm3, %zmm0, %zmm3
|
||||
; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; AVX512-NEXT: vpsubq %zmm1, %zmm4, %zmm1
|
||||
; AVX512-NEXT: vpandq %zmm2, %zmm1, %zmm1
|
||||
; AVX512-NEXT: vpsllvq %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vporq %zmm3, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vprorvq %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> %amt)
|
||||
ret <8 x i64> %res
|
||||
|
@ -32,14 +25,7 @@ define <8 x i64> @var_funnnel_v8i64(<8 x i64> %x, <8 x i64> %amt) nounwind {
|
|||
define <16 x i32> @var_funnnel_v16i32(<16 x i32> %x, <16 x i32> %amt) nounwind {
|
||||
; AVX512-LABEL: var_funnnel_v16i32:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
|
||||
; AVX512-NEXT: vpandd %zmm2, %zmm1, %zmm3
|
||||
; AVX512-NEXT: vpsrlvd %zmm3, %zmm0, %zmm3
|
||||
; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; AVX512-NEXT: vpsubd %zmm1, %zmm4, %zmm1
|
||||
; AVX512-NEXT: vpandd %zmm2, %zmm1, %zmm1
|
||||
; AVX512-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vpord %zmm3, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vprorvd %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> %amt)
|
||||
ret <16 x i32> %res
|
||||
|
@ -325,14 +311,7 @@ define <8 x i64> @splatvar_funnnel_v8i64(<8 x i64> %x, <8 x i64> %amt) nounwind
|
|||
; AVX512-LABEL: splatvar_funnnel_v8i64:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastq %xmm1, %zmm1
|
||||
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [63,63]
|
||||
; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm3
|
||||
; AVX512-NEXT: vpsrlq %xmm3, %zmm0, %zmm3
|
||||
; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; AVX512-NEXT: vpsubq %xmm1, %xmm4, %xmm1
|
||||
; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; AVX512-NEXT: vpsllq %xmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vporq %zmm3, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vprorvq %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%splat = shufflevector <8 x i64> %amt, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> %splat)
|
||||
|
@ -343,16 +322,7 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %amt) nounw
|
|||
; AVX512-LABEL: splatvar_funnnel_v16i32:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpbroadcastd %xmm1, %zmm1
|
||||
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
|
||||
; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm3
|
||||
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
|
||||
; AVX512-NEXT: vpsrld %xmm3, %zmm0, %zmm3
|
||||
; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; AVX512-NEXT: vpsubd %xmm1, %xmm4, %xmm1
|
||||
; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
||||
; AVX512-NEXT: vpslld %xmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vpord %zmm3, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vprorvd %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%splat = shufflevector <16 x i32> %amt, <16 x i32> undef, <16 x i32> zeroinitializer
|
||||
%res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> %splat)
|
||||
|
@ -544,7 +514,7 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
|
|||
define <8 x i64> @constant_funnnel_v8i64(<8 x i64> %x) nounwind {
|
||||
; AVX512-LABEL: constant_funnnel_v8i64:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vprolvq {{.*}}(%rip), %zmm0, %zmm0
|
||||
; AVX512-NEXT: vprorvq {{.*}}(%rip), %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> <i64 4, i64 14, i64 50, i64 60, i64 4, i64 14, i64 50, i64 60>)
|
||||
ret <8 x i64> %res
|
||||
|
@ -553,7 +523,7 @@ define <8 x i64> @constant_funnnel_v8i64(<8 x i64> %x) nounwind {
|
|||
define <16 x i32> @constant_funnnel_v16i32(<16 x i32> %x) nounwind {
|
||||
; AVX512-LABEL: constant_funnnel_v16i32:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vprolvd {{.*}}(%rip), %zmm0, %zmm0
|
||||
; AVX512-NEXT: vprorvd {{.*}}(%rip), %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>)
|
||||
ret <16 x i32> %res
|
||||
|
@ -769,7 +739,7 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x) nounwind {
|
|||
define <8 x i64> @splatconstant_funnnel_v8i64(<8 x i64> %x) nounwind {
|
||||
; AVX512-LABEL: splatconstant_funnnel_v8i64:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vprolq $50, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vprorq $14, %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %x, <8 x i64> %x, <8 x i64> <i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14, i64 14>)
|
||||
ret <8 x i64> %res
|
||||
|
@ -778,7 +748,7 @@ define <8 x i64> @splatconstant_funnnel_v8i64(<8 x i64> %x) nounwind {
|
|||
define <16 x i32> @splatconstant_funnnel_v16i32(<16 x i32> %x) nounwind {
|
||||
; AVX512-LABEL: splatconstant_funnnel_v16i32:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vprold $28, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vprord $4, %zmm0, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%res = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %x, <16 x i32> %x, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>)
|
||||
ret <16 x i32> %res
|
||||
|
|
Loading…
Reference in New Issue