forked from OSchip/llvm-project
[X86] Fold scalar_to_vector(x) -> extract_subvector(broadcast(x),0) iff broadcast(x) exists
Add handling for reusing an existing broadcast(x) to a wider vector.
This commit is contained in:
parent
66799bf0e2
commit
aa4afebbf9
|
@ -49882,11 +49882,16 @@ static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
|
|||
|
||||
// See if we're broadcasting the scalar value, in which case just reuse that.
|
||||
// Ensure the same SDValue from the SDNode use is being used.
|
||||
// TODO: Handle different vector sizes when we have test coverage.
|
||||
for (SDNode *User : Src->uses())
|
||||
if (User->getOpcode() == X86ISD::VBROADCAST && Src == User->getOperand(0) &&
|
||||
User->getValueSizeInBits(0).getFixedSize() == VT.getFixedSizeInBits())
|
||||
return SDValue(User, 0);
|
||||
if (User->getOpcode() == X86ISD::VBROADCAST && Src == User->getOperand(0)) {
|
||||
unsigned SizeInBits = VT.getFixedSizeInBits();
|
||||
unsigned BroadcastSizeInBits = User->getValueSizeInBits(0).getFixedSize();
|
||||
if (BroadcastSizeInBits == SizeInBits)
|
||||
return SDValue(User, 0);
|
||||
if (BroadcastSizeInBits > SizeInBits)
|
||||
return extractSubVector(SDValue(User, 0), 0, DAG, DL, SizeInBits);
|
||||
// TODO: Handle BroadcastSizeInBits < SizeInBits when we have test coverage.
|
||||
}
|
||||
|
||||
return SDValue();
|
||||
}
|
||||
|
|
|
@ -347,19 +347,19 @@ define void @test_mscatter_v17f32(float* %base, <17 x i32> %index, <17 x float>
|
|||
; WIDEN_AVX2-LABEL: test_mscatter_v17f32:
|
||||
; WIDEN_AVX2: # %bb.0:
|
||||
; WIDEN_AVX2-NEXT: vmovq %rdi, %xmm8
|
||||
; WIDEN_AVX2-NEXT: vpbroadcastq %xmm8, %ymm9
|
||||
; WIDEN_AVX2-NEXT: vmovd %esi, %xmm10
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $1, %edx, %xmm10, %xmm10
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $2, %ecx, %xmm10, %xmm10
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $3, %r8d, %xmm10, %xmm10
|
||||
; WIDEN_AVX2-NEXT: vpmovsxdq %xmm10, %ymm10
|
||||
; WIDEN_AVX2-NEXT: vpsllq $2, %ymm10, %ymm10
|
||||
; WIDEN_AVX2-NEXT: vpaddq %ymm10, %ymm9, %ymm10
|
||||
; WIDEN_AVX2-NEXT: vmovq %xmm10, %rax
|
||||
; WIDEN_AVX2-NEXT: vpbroadcastq %xmm8, %ymm8
|
||||
; WIDEN_AVX2-NEXT: vmovd %esi, %xmm9
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $1, %edx, %xmm9, %xmm9
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $2, %ecx, %xmm9, %xmm9
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $3, %r8d, %xmm9, %xmm9
|
||||
; WIDEN_AVX2-NEXT: vpmovsxdq %xmm9, %ymm9
|
||||
; WIDEN_AVX2-NEXT: vpsllq $2, %ymm9, %ymm9
|
||||
; WIDEN_AVX2-NEXT: vpaddq %ymm9, %ymm8, %ymm9
|
||||
; WIDEN_AVX2-NEXT: vmovq %xmm9, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm0, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vpextrq $1, %xmm10, %rax
|
||||
; WIDEN_AVX2-NEXT: vpextrq $1, %xmm9, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm1, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vextracti128 $1, %ymm10, %xmm0
|
||||
; WIDEN_AVX2-NEXT: vextracti128 $1, %ymm9, %xmm0
|
||||
; WIDEN_AVX2-NEXT: vmovq %xmm0, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm2, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vpextrq $1, %xmm0, %rax
|
||||
|
@ -370,7 +370,7 @@ define void @test_mscatter_v17f32(float* %base, <17 x i32> %index, <17 x float>
|
|||
; WIDEN_AVX2-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0
|
||||
; WIDEN_AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
|
||||
; WIDEN_AVX2-NEXT: vpsllq $2, %ymm0, %ymm0
|
||||
; WIDEN_AVX2-NEXT: vpaddq %ymm0, %ymm9, %ymm0
|
||||
; WIDEN_AVX2-NEXT: vpaddq %ymm0, %ymm8, %ymm0
|
||||
; WIDEN_AVX2-NEXT: vmovq %xmm0, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm4, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vpextrq $1, %xmm0, %rax
|
||||
|
@ -379,13 +379,7 @@ define void @test_mscatter_v17f32(float* %base, <17 x i32> %index, <17 x float>
|
|||
; WIDEN_AVX2-NEXT: vmovq %xmm0, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm6, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vpextrq $1, %xmm0, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0
|
||||
; WIDEN_AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
|
||||
; WIDEN_AVX2-NEXT: vpsllq $2, %ymm0, %ymm0
|
||||
; WIDEN_AVX2-NEXT: vpaddq %ymm0, %ymm9, %ymm0
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm7, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1
|
||||
|
@ -393,31 +387,37 @@ define void @test_mscatter_v17f32(float* %base, <17 x i32> %index, <17 x float>
|
|||
; WIDEN_AVX2-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
|
||||
; WIDEN_AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
|
||||
; WIDEN_AVX2-NEXT: vpsllq $2, %ymm1, %ymm1
|
||||
; WIDEN_AVX2-NEXT: vpaddq %ymm1, %ymm9, %ymm1
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vpaddq %ymm1, %ymm8, %ymm1
|
||||
; WIDEN_AVX2-NEXT: vmovq %xmm1, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm2, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm0, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vpextrq $1, %xmm1, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm2, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm0, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
|
||||
; WIDEN_AVX2-NEXT: vmovq %xmm1, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm2, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm0, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vpextrq $1, %xmm1, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm1, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vmovq %xmm0, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm1, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vpextrq $1, %xmm0, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm1, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
|
||||
; WIDEN_AVX2-NEXT: vmovq %xmm0, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm1, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vpextrq $1, %xmm0, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
|
||||
; WIDEN_AVX2-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
|
||||
; WIDEN_AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
|
||||
; WIDEN_AVX2-NEXT: vpsllq $2, %ymm1, %ymm1
|
||||
; WIDEN_AVX2-NEXT: vpaddq %ymm1, %ymm8, %ymm1
|
||||
; WIDEN_AVX2-NEXT: vmovq %xmm1, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm0, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vpextrq $1, %xmm1, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm0, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
|
||||
; WIDEN_AVX2-NEXT: vmovq %xmm1, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm0, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vpextrq $1, %xmm1, %rax
|
||||
; WIDEN_AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; WIDEN_AVX2-NEXT: vmovss %xmm0, (%rax)
|
||||
; WIDEN_AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
|
|
Loading…
Reference in New Issue