forked from OSchip/llvm-project
[X86][SSE] getFauxShuffleMask - add support for INSERT_VECTOR_ELT(EXTRACT_VECTOR_ELT) shuffle pattern
We already do this for PINSRB/PINSRW and SCALAR_TO_VECTOR.
This commit is contained in:
parent
e6a7e3b5e3
commit
c8ede5e485
|
@ -2018,6 +2018,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
|
||||||
// We have target-specific dag combine patterns for the following nodes:
|
// We have target-specific dag combine patterns for the following nodes:
|
||||||
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
|
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
|
||||||
setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
|
setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
|
||||||
|
setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
|
||||||
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
|
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
|
||||||
setTargetDAGCombine(ISD::CONCAT_VECTORS);
|
setTargetDAGCombine(ISD::CONCAT_VECTORS);
|
||||||
setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
|
setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
|
||||||
|
@ -7369,37 +7370,63 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
case ISD::SCALAR_TO_VECTOR: {
|
case ISD::SCALAR_TO_VECTOR:
|
||||||
// Match against a scalar_to_vector of an extract from a vector,
|
case ISD::INSERT_VECTOR_ELT: {
|
||||||
// for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
|
// Match against a insert_vector_elt/scalar_to_vector of an extract from a
|
||||||
SDValue N0 = N.getOperand(0);
|
// vector, for matching src/dst vector types.
|
||||||
|
// TODO: Merge with PINSRB/PINSRW cases below.
|
||||||
|
// TODO: Handle truncate/zext/shift of scalars.
|
||||||
|
SDValue Scl = N.getOperand(Opcode == ISD::SCALAR_TO_VECTOR ? 0 : 1);
|
||||||
SDValue SrcExtract;
|
SDValue SrcExtract;
|
||||||
|
|
||||||
if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
|
if ((Scl.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
|
||||||
N0.getOperand(0).getValueType() == VT) ||
|
Scl.getOperand(0).getValueType() == VT) ||
|
||||||
(N0.getOpcode() == X86ISD::PEXTRW &&
|
(Scl.getOpcode() == X86ISD::PEXTRW &&
|
||||||
N0.getOperand(0).getValueType() == MVT::v8i16) ||
|
Scl.getOperand(0).getValueType() == MVT::v8i16) ||
|
||||||
(N0.getOpcode() == X86ISD::PEXTRB &&
|
(Scl.getOpcode() == X86ISD::PEXTRB &&
|
||||||
N0.getOperand(0).getValueType() == MVT::v16i8)) {
|
Scl.getOperand(0).getValueType() == MVT::v16i8)) {
|
||||||
SrcExtract = N0;
|
SrcExtract = Scl;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
|
if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
|
||||||
return false;
|
return false;
|
||||||
|
if (Opcode != ISD::SCALAR_TO_VECTOR &&
|
||||||
|
!isa<ConstantSDNode>(N.getOperand(2)))
|
||||||
|
return false;
|
||||||
|
|
||||||
SDValue SrcVec = SrcExtract.getOperand(0);
|
SDValue SrcVec = SrcExtract.getOperand(0);
|
||||||
EVT SrcVT = SrcVec.getValueType();
|
EVT SrcVT = SrcVec.getValueType();
|
||||||
unsigned NumSrcElts = SrcVT.getVectorNumElements();
|
unsigned NumSrcElts = SrcVT.getVectorNumElements();
|
||||||
unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
|
unsigned NumZeros =
|
||||||
|
std::max<int>((NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1, 0);
|
||||||
|
|
||||||
|
if (SrcVT.getSizeInBits() != VT.getSizeInBits() ||
|
||||||
|
(NumSrcElts % NumElts) != 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
|
unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
|
||||||
if (NumSrcElts <= SrcIdx)
|
if (NumSrcElts <= SrcIdx)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
unsigned DstIdx =
|
||||||
|
Opcode == ISD::SCALAR_TO_VECTOR ? 0 : N.getConstantOperandVal(2);
|
||||||
|
if (NumElts <= DstIdx)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (Opcode == ISD::SCALAR_TO_VECTOR) {
|
||||||
Ops.push_back(SrcVec);
|
Ops.push_back(SrcVec);
|
||||||
Mask.push_back(SrcIdx);
|
Mask.append(NumSrcElts, SM_SentinelUndef);
|
||||||
Mask.append(NumZeros, SM_SentinelZero);
|
} else {
|
||||||
Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
|
Ops.push_back(SrcVec);
|
||||||
|
Ops.push_back(N.getOperand(0));
|
||||||
|
for (int i = 0; i != (int)NumSrcElts; ++i)
|
||||||
|
Mask.push_back(NumSrcElts + i);
|
||||||
|
}
|
||||||
|
|
||||||
|
int Scale = NumSrcElts / NumElts;
|
||||||
|
Mask[Scale * DstIdx] = SrcIdx;
|
||||||
|
for (int i = 0; i != (int)NumZeros; ++i)
|
||||||
|
Mask[(Scale * DstIdx) + i + 1] = SM_SentinelZero;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
case X86ISD::PINSRB:
|
case X86ISD::PINSRB:
|
||||||
|
@ -40562,19 +40589,24 @@ static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
|
||||||
const X86Subtarget &Subtarget) {
|
const X86Subtarget &Subtarget) {
|
||||||
EVT VT = N->getValueType(0);
|
EVT VT = N->getValueType(0);
|
||||||
assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
|
assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
|
||||||
(N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) &&
|
(N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16) ||
|
||||||
|
N->getOpcode() == ISD::INSERT_VECTOR_ELT) &&
|
||||||
"Unexpected vector insertion");
|
"Unexpected vector insertion");
|
||||||
|
|
||||||
|
if (N->getOpcode() == X86ISD::PINSRB || N->getOpcode() == X86ISD::PINSRW) {
|
||||||
unsigned NumBitsPerElt = VT.getScalarSizeInBits();
|
unsigned NumBitsPerElt = VT.getScalarSizeInBits();
|
||||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||||
if (TLI.SimplifyDemandedBits(SDValue(N, 0),
|
if (TLI.SimplifyDemandedBits(SDValue(N, 0),
|
||||||
APInt::getAllOnesValue(NumBitsPerElt), DCI))
|
APInt::getAllOnesValue(NumBitsPerElt), DCI))
|
||||||
return SDValue(N, 0);
|
return SDValue(N, 0);
|
||||||
|
}
|
||||||
|
|
||||||
// Attempt to combine PINSRB/PINSRW patterns to a shuffle.
|
// Attempt to combine insertion patterns to a shuffle.
|
||||||
|
if (VT.isSimple() && DCI.isAfterLegalizeDAG()) {
|
||||||
SDValue Op(N, 0);
|
SDValue Op(N, 0);
|
||||||
if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
|
if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
|
||||||
return Res;
|
return Res;
|
||||||
|
}
|
||||||
|
|
||||||
return SDValue();
|
return SDValue();
|
||||||
}
|
}
|
||||||
|
@ -47189,6 +47221,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
|
||||||
case X86ISD::VSRAI:
|
case X86ISD::VSRAI:
|
||||||
case X86ISD::VSRLI:
|
case X86ISD::VSRLI:
|
||||||
return combineVectorShiftImm(N, DAG, DCI, Subtarget);
|
return combineVectorShiftImm(N, DAG, DCI, Subtarget);
|
||||||
|
case ISD::INSERT_VECTOR_ELT:
|
||||||
case X86ISD::PINSRB:
|
case X86ISD::PINSRB:
|
||||||
case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
|
case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
|
||||||
case X86ISD::SHUFP: // Handle all target specific shuffles
|
case X86ISD::SHUFP: // Handle all target specific shuffles
|
||||||
|
|
|
@ -48,10 +48,9 @@ define <16 x float> @test3(<16 x float> %x) nounwind {
|
||||||
define <8 x i64> @test4(<8 x i64> %x) nounwind {
|
define <8 x i64> @test4(<8 x i64> %x) nounwind {
|
||||||
; CHECK-LABEL: test4:
|
; CHECK-LABEL: test4:
|
||||||
; CHECK: ## %bb.0:
|
; CHECK: ## %bb.0:
|
||||||
; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm1
|
; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1
|
||||||
; CHECK-NEXT: vmovq %xmm1, %rax
|
; CHECK-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm1[0]
|
||||||
; CHECK-NEXT: vpinsrq $1, %rax, %xmm0, %xmm1
|
; CHECK-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
|
||||||
; CHECK-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
|
|
||||||
; CHECK-NEXT: retq
|
; CHECK-NEXT: retq
|
||||||
%eee = extractelement <8 x i64> %x, i32 4
|
%eee = extractelement <8 x i64> %x, i32 4
|
||||||
%rrr2 = insertelement <8 x i64> %x, i64 %eee, i32 1
|
%rrr2 = insertelement <8 x i64> %x, i64 %eee, i32 1
|
||||||
|
|
|
@ -1730,15 +1730,9 @@ define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask2(<16 x i32>* %vp,
|
||||||
define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp) {
|
define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp) {
|
||||||
; CHECK-LABEL: test_16xi32_to_4xi32_perm_mem_mask3:
|
; CHECK-LABEL: test_16xi32_to_4xi32_perm_mem_mask3:
|
||||||
; CHECK: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; CHECK-NEXT: vpbroadcastd 24(%rdi), %xmm0
|
; CHECK-NEXT: vmovdqa 16(%rdi), %xmm1
|
||||||
; CHECK-NEXT: vmovdqa (%rdi), %xmm1
|
; CHECK-NEXT: vmovdqa {{.*#+}} xmm0 = [2,4,3,6]
|
||||||
; CHECK-NEXT: vmovaps 16(%rdi), %xmm2
|
; CHECK-NEXT: vpermi2d (%rdi), %xmm1, %xmm0
|
||||||
; CHECK-NEXT: vmovd %xmm1, %eax
|
|
||||||
; CHECK-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
|
|
||||||
; CHECK-NEXT: vextractps $3, %xmm2, %eax
|
|
||||||
; CHECK-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
|
|
||||||
; CHECK-NEXT: vpextrd $2, %xmm1, %eax
|
|
||||||
; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
|
|
||||||
; CHECK-NEXT: retq
|
; CHECK-NEXT: retq
|
||||||
%vec = load <16 x i32>, <16 x i32>* %vp
|
%vec = load <16 x i32>, <16 x i32>* %vp
|
||||||
%res = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 6, i32 0, i32 7, i32 2>
|
%res = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 6, i32 0, i32 7, i32 2>
|
||||||
|
@ -1747,17 +1741,11 @@ define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp) {
|
||||||
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
|
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
|
||||||
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask3:
|
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask3:
|
||||||
; CHECK: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; CHECK-NEXT: vpbroadcastd 24(%rdi), %xmm2
|
; CHECK-NEXT: vmovdqa 16(%rdi), %xmm2
|
||||||
; CHECK-NEXT: vmovdqa (%rdi), %xmm3
|
; CHECK-NEXT: vmovdqa {{.*#+}} xmm3 = [2,4,3,6]
|
||||||
; CHECK-NEXT: vmovaps 16(%rdi), %xmm4
|
; CHECK-NEXT: vpermi2d (%rdi), %xmm2, %xmm3
|
||||||
; CHECK-NEXT: vmovd %xmm3, %eax
|
|
||||||
; CHECK-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
|
|
||||||
; CHECK-NEXT: vextractps $3, %xmm4, %eax
|
|
||||||
; CHECK-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
|
|
||||||
; CHECK-NEXT: vpextrd $2, %xmm3, %eax
|
|
||||||
; CHECK-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2
|
|
||||||
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
|
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
|
||||||
; CHECK-NEXT: vmovdqa32 %xmm2, %xmm0 {%k1}
|
; CHECK-NEXT: vmovdqa32 %xmm3, %xmm0 {%k1}
|
||||||
; CHECK-NEXT: retq
|
; CHECK-NEXT: retq
|
||||||
%vec = load <16 x i32>, <16 x i32>* %vp
|
%vec = load <16 x i32>, <16 x i32>* %vp
|
||||||
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 6, i32 0, i32 7, i32 2>
|
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 6, i32 0, i32 7, i32 2>
|
||||||
|
@ -1769,17 +1757,11 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4
|
||||||
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4 x i32> %mask) {
|
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4 x i32> %mask) {
|
||||||
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask3:
|
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask3:
|
||||||
; CHECK: # %bb.0:
|
; CHECK: # %bb.0:
|
||||||
; CHECK-NEXT: vpbroadcastd 24(%rdi), %xmm1
|
; CHECK-NEXT: vmovdqa 16(%rdi), %xmm2
|
||||||
; CHECK-NEXT: vmovdqa (%rdi), %xmm2
|
; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [2,4,3,6]
|
||||||
; CHECK-NEXT: vmovaps 16(%rdi), %xmm3
|
|
||||||
; CHECK-NEXT: vmovd %xmm2, %eax
|
|
||||||
; CHECK-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
|
|
||||||
; CHECK-NEXT: vextractps $3, %xmm3, %eax
|
|
||||||
; CHECK-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
|
|
||||||
; CHECK-NEXT: vpextrd $2, %xmm2, %eax
|
|
||||||
; CHECK-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
|
|
||||||
; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1
|
; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1
|
||||||
; CHECK-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} {z}
|
; CHECK-NEXT: vpermi2d (%rdi), %xmm2, %xmm1 {%k1} {z}
|
||||||
|
; CHECK-NEXT: vmovdqa %xmm1, %xmm0
|
||||||
; CHECK-NEXT: retq
|
; CHECK-NEXT: retq
|
||||||
%vec = load <16 x i32>, <16 x i32>* %vp
|
%vec = load <16 x i32>, <16 x i32>* %vp
|
||||||
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 6, i32 0, i32 7, i32 2>
|
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 6, i32 0, i32 7, i32 2>
|
||||||
|
|
|
@ -40,9 +40,8 @@ define <8 x i64> @insert_subvector_512(i32 %x0, i32 %x1, <8 x i64> %v) nounwind
|
||||||
; X64_AVX256: # %bb.0:
|
; X64_AVX256: # %bb.0:
|
||||||
; X64_AVX256-NEXT: vmovd %edi, %xmm2
|
; X64_AVX256-NEXT: vmovd %edi, %xmm2
|
||||||
; X64_AVX256-NEXT: vpinsrd $1, %esi, %xmm2, %xmm2
|
; X64_AVX256-NEXT: vpinsrd $1, %esi, %xmm2, %xmm2
|
||||||
; X64_AVX256-NEXT: vmovq %xmm2, %rax
|
; X64_AVX256-NEXT: vextracti128 $1, %ymm0, %xmm3
|
||||||
; X64_AVX256-NEXT: vextracti128 $1, %ymm0, %xmm2
|
; X64_AVX256-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3]
|
||||||
; X64_AVX256-NEXT: vpinsrq $0, %rax, %xmm2, %xmm2
|
|
||||||
; X64_AVX256-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
; X64_AVX256-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||||
; X64_AVX256-NEXT: retq
|
; X64_AVX256-NEXT: retq
|
||||||
;
|
;
|
||||||
|
|
|
@ -1891,19 +1891,8 @@ define <4 x i32> @larger_mul(<16 x i16> %A, <16 x i16> %B) {
|
||||||
; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
|
; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
|
||||||
; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
|
; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
|
||||||
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
||||||
; AVX512-NEXT: vpextrd $2, %xmm0, %eax
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||||
; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1
|
; AVX512-NEXT: vphaddd %xmm1, %xmm0, %xmm0
|
||||||
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
|
|
||||||
; AVX512-NEXT: vmovd %xmm2, %eax
|
|
||||||
; AVX512-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
|
|
||||||
; AVX512-NEXT: vpextrd $2, %xmm2, %eax
|
|
||||||
; AVX512-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
|
|
||||||
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
|
|
||||||
; AVX512-NEXT: vpextrd $1, %xmm2, %eax
|
|
||||||
; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
|
|
||||||
; AVX512-NEXT: vpextrd $3, %xmm2, %eax
|
|
||||||
; AVX512-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
|
|
||||||
; AVX512-NEXT: vpaddd %xmm0, %xmm1, %xmm0
|
|
||||||
; AVX512-NEXT: vzeroupper
|
; AVX512-NEXT: vzeroupper
|
||||||
; AVX512-NEXT: retq
|
; AVX512-NEXT: retq
|
||||||
%a = sext <16 x i16> %A to <16 x i32>
|
%a = sext <16 x i16> %A to <16 x i32>
|
||||||
|
|
|
@ -687,15 +687,12 @@ define <2 x i64> @strict_vector_fptosi_v2f32_to_v2i64(<2 x float> %a) #0 {
|
||||||
;
|
;
|
||||||
; AVX512DQ-32-LABEL: strict_vector_fptosi_v2f32_to_v2i64:
|
; AVX512DQ-32-LABEL: strict_vector_fptosi_v2f32_to_v2i64:
|
||||||
; AVX512DQ-32: # %bb.0:
|
; AVX512DQ-32: # %bb.0:
|
||||||
; AVX512DQ-32-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
; AVX512DQ-32-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[1],zero,zero,zero
|
||||||
; AVX512DQ-32-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
|
|
||||||
; AVX512DQ-32-NEXT: vcvttps2qq %ymm1, %zmm1
|
; AVX512DQ-32-NEXT: vcvttps2qq %ymm1, %zmm1
|
||||||
; AVX512DQ-32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,zero,zero
|
; AVX512DQ-32-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
||||||
|
; AVX512DQ-32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
|
||||||
; AVX512DQ-32-NEXT: vcvttps2qq %ymm0, %zmm0
|
; AVX512DQ-32-NEXT: vcvttps2qq %ymm0, %zmm0
|
||||||
; AVX512DQ-32-NEXT: vmovd %xmm0, %eax
|
; AVX512DQ-32-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||||
; AVX512DQ-32-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
|
|
||||||
; AVX512DQ-32-NEXT: vpextrd $1, %xmm0, %eax
|
|
||||||
; AVX512DQ-32-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
|
|
||||||
; AVX512DQ-32-NEXT: vzeroupper
|
; AVX512DQ-32-NEXT: vzeroupper
|
||||||
; AVX512DQ-32-NEXT: retl
|
; AVX512DQ-32-NEXT: retl
|
||||||
;
|
;
|
||||||
|
@ -1021,15 +1018,12 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 {
|
||||||
;
|
;
|
||||||
; AVX512DQ-32-LABEL: strict_vector_fptoui_v2f32_to_v2i64:
|
; AVX512DQ-32-LABEL: strict_vector_fptoui_v2f32_to_v2i64:
|
||||||
; AVX512DQ-32: # %bb.0:
|
; AVX512DQ-32: # %bb.0:
|
||||||
; AVX512DQ-32-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
; AVX512DQ-32-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[1],zero,zero,zero
|
||||||
; AVX512DQ-32-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
|
|
||||||
; AVX512DQ-32-NEXT: vcvttps2uqq %ymm1, %zmm1
|
; AVX512DQ-32-NEXT: vcvttps2uqq %ymm1, %zmm1
|
||||||
; AVX512DQ-32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,zero,zero
|
; AVX512DQ-32-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
||||||
|
; AVX512DQ-32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
|
||||||
; AVX512DQ-32-NEXT: vcvttps2uqq %ymm0, %zmm0
|
; AVX512DQ-32-NEXT: vcvttps2uqq %ymm0, %zmm0
|
||||||
; AVX512DQ-32-NEXT: vmovd %xmm0, %eax
|
; AVX512DQ-32-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||||
; AVX512DQ-32-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
|
|
||||||
; AVX512DQ-32-NEXT: vpextrd $1, %xmm0, %eax
|
|
||||||
; AVX512DQ-32-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
|
|
||||||
; AVX512DQ-32-NEXT: vzeroupper
|
; AVX512DQ-32-NEXT: vzeroupper
|
||||||
; AVX512DQ-32-NEXT: retl
|
; AVX512DQ-32-NEXT: retl
|
||||||
;
|
;
|
||||||
|
|
|
@ -641,11 +641,11 @@ define <4 x double> @sitofp_v4i64_v4f64(<4 x i64> %x) #0 {
|
||||||
; AVX-32-NEXT: andl $-8, %esp
|
; AVX-32-NEXT: andl $-8, %esp
|
||||||
; AVX-32-NEXT: subl $64, %esp
|
; AVX-32-NEXT: subl $64, %esp
|
||||||
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
||||||
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm0
|
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||||
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
; AVX-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
|
||||||
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp)
|
||||||
|
@ -876,11 +876,11 @@ define <4 x float> @sitofp_v4i64_v4f32(<4 x i64> %x) #0 {
|
||||||
; AVX-32-NEXT: andl $-8, %esp
|
; AVX-32-NEXT: andl $-8, %esp
|
||||||
; AVX-32-NEXT: subl $48, %esp
|
; AVX-32-NEXT: subl $48, %esp
|
||||||
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
||||||
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm0
|
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||||
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
; AVX-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
|
||||||
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: fstps {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: fstps {{[0-9]+}}(%esp)
|
||||||
|
@ -999,11 +999,11 @@ define <4 x float> @uitofp_v4i64_v4f32(<4 x i64> %x) #0 {
|
||||||
; AVX-32-NEXT: andl $-8, %esp
|
; AVX-32-NEXT: andl $-8, %esp
|
||||||
; AVX-32-NEXT: subl $48, %esp
|
; AVX-32-NEXT: subl $48, %esp
|
||||||
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
||||||
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm1
|
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||||
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
; AVX-32-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[2,3,2,3]
|
||||||
; AVX-32-NEXT: vmovlps %xmm2, {{[0-9]+}}(%esp)
|
; AVX-32-NEXT: vmovlps %xmm2, {{[0-9]+}}(%esp)
|
||||||
; AVX-32-NEXT: vextractps $1, %xmm0, %eax
|
; AVX-32-NEXT: vextractps $1, %xmm0, %eax
|
||||||
; AVX-32-NEXT: shrl $31, %eax
|
; AVX-32-NEXT: shrl $31, %eax
|
||||||
|
|
|
@ -273,18 +273,18 @@ define <8 x double> @sitofp_v8i64_v8f64(<8 x i64> %x) #0 {
|
||||||
; NODQ-32-NEXT: subl $128, %esp
|
; NODQ-32-NEXT: subl $128, %esp
|
||||||
; NODQ-32-NEXT: vextractf32x4 $2, %zmm0, %xmm1
|
; NODQ-32-NEXT: vextractf32x4 $2, %zmm0, %xmm1
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vextractf32x4 $3, %zmm0, %xmm1
|
; NODQ-32-NEXT: vextractf32x4 $3, %zmm0, %xmm1
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vextractf128 $1, %ymm0, %xmm0
|
; NODQ-32-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||||
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp)
|
||||||
|
@ -401,19 +401,19 @@ define <8 x float> @sitofp_v8i64_v8f32(<8 x i64> %x) #0 {
|
||||||
; NODQ-32-NEXT: andl $-8, %esp
|
; NODQ-32-NEXT: andl $-8, %esp
|
||||||
; NODQ-32-NEXT: subl $96, %esp
|
; NODQ-32-NEXT: subl $96, %esp
|
||||||
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vextractf128 $1, %ymm0, %xmm1
|
; NODQ-32-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vextractf32x4 $2, %zmm0, %xmm1
|
; NODQ-32-NEXT: vextractf32x4 $2, %zmm0, %xmm1
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vextractf32x4 $3, %zmm0, %xmm0
|
; NODQ-32-NEXT: vextractf32x4 $3, %zmm0, %xmm0
|
||||||
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp)
|
||||||
|
@ -497,19 +497,19 @@ define <8 x float> @uitofp_v8i64_v8f32(<8 x i64> %x) #0 {
|
||||||
; NODQ-32-NEXT: andl $-8, %esp
|
; NODQ-32-NEXT: andl $-8, %esp
|
||||||
; NODQ-32-NEXT: subl $96, %esp
|
; NODQ-32-NEXT: subl $96, %esp
|
||||||
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vextractf128 $1, %ymm0, %xmm3
|
; NODQ-32-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||||
; NODQ-32-NEXT: vmovlps %xmm3, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm3, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm3[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm3[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vextractf32x4 $2, %zmm0, %xmm2
|
; NODQ-32-NEXT: vextractf32x4 $2, %zmm0, %xmm2
|
||||||
; NODQ-32-NEXT: vmovlps %xmm2, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm2, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vextractf32x4 $3, %zmm0, %xmm1
|
; NODQ-32-NEXT: vextractf32x4 $3, %zmm0, %xmm1
|
||||||
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm4 = xmm1[2,3,0,1]
|
; NODQ-32-NEXT: vpermilps {{.*#+}} xmm4 = xmm1[2,3,2,3]
|
||||||
; NODQ-32-NEXT: vmovlps %xmm4, {{[0-9]+}}(%esp)
|
; NODQ-32-NEXT: vmovlps %xmm4, {{[0-9]+}}(%esp)
|
||||||
; NODQ-32-NEXT: vextractps $1, %xmm0, %eax
|
; NODQ-32-NEXT: vextractps $1, %xmm0, %eax
|
||||||
; NODQ-32-NEXT: shrl $31, %eax
|
; NODQ-32-NEXT: shrl $31, %eax
|
||||||
|
|
|
@ -2105,14 +2105,12 @@ define <4 x i32> @extract3_insert3_v4i32_0127(<4 x i32> %a0, <4 x i32> %a1) {
|
||||||
;
|
;
|
||||||
; SSE41-LABEL: extract3_insert3_v4i32_0127:
|
; SSE41-LABEL: extract3_insert3_v4i32_0127:
|
||||||
; SSE41: # %bb.0:
|
; SSE41: # %bb.0:
|
||||||
; SSE41-NEXT: extractps $3, %xmm1, %eax
|
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
|
||||||
; SSE41-NEXT: pinsrd $3, %eax, %xmm0
|
|
||||||
; SSE41-NEXT: retq
|
; SSE41-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: extract3_insert3_v4i32_0127:
|
; AVX-LABEL: extract3_insert3_v4i32_0127:
|
||||||
; AVX: # %bb.0:
|
; AVX: # %bb.0:
|
||||||
; AVX-NEXT: vextractps $3, %xmm1, %eax
|
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
|
||||||
; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
|
|
||||||
; AVX-NEXT: retq
|
; AVX-NEXT: retq
|
||||||
%1 = extractelement <4 x i32> %a1, i32 3
|
%1 = extractelement <4 x i32> %a1, i32 3
|
||||||
%2 = insertelement <4 x i32> %a0, i32 %1, i32 3
|
%2 = insertelement <4 x i32> %a0, i32 %1, i32 3
|
||||||
|
|
Loading…
Reference in New Issue