forked from OSchip/llvm-project
[X86][Codegen] PR51615: don't replace wide volatile load with narrow broadcast-from-memory
Even though https://bugs.llvm.org/show_bug.cgi?id=51615 appears to be introduced by D105390, the fix lies here. We can not replace a wide volatile load with a broadcast-from-memory, because that would narrow the load, which isn't legal for volatiles. Reviewed By: spatel Differential Revision: https://reviews.llvm.org/D108757
This commit is contained in:
parent
0bcfd4cbac
commit
a8125bf4a8
|
@ -5036,6 +5036,17 @@ static bool MayFoldLoad(SDValue Op) {
|
|||
return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
|
||||
}
|
||||
|
||||
static bool MayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT) {
|
||||
if (!MayFoldLoad(Op))
|
||||
return false;
|
||||
|
||||
// We can not replace a wide volatile load with a broadcast-from-memory,
|
||||
// because that would narrow the load, which isn't legal for volatiles.
|
||||
const LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op.getNode());
|
||||
return !Ld->isVolatile() ||
|
||||
Ld->getValueSizeInBits(0) == EltVT.getScalarSizeInBits();
|
||||
}
|
||||
|
||||
static bool MayFoldIntoStore(SDValue Op) {
|
||||
return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
|
||||
}
|
||||
|
@ -50876,7 +50887,8 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
|
|||
|
||||
// concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
|
||||
if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
|
||||
(Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
|
||||
(Subtarget.hasAVX2() || MayFoldLoadIntoBroadcastFromMem(
|
||||
Op0.getOperand(0), VT.getScalarType())))
|
||||
return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
|
||||
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
|
||||
Op0.getOperand(0),
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX2
|
||||
|
||||
; https://bugs.llvm.org/show_bug.cgi?id=51615
|
||||
; We can not replace a wide volatile load with a broadcast-from-memory,
|
||||
; because that would narrow the load, which isn't legal for volatiles.
|
||||
|
||||
@g0 = external dso_local global <2 x double>, align 16
|
||||
define void @volatile_load_2_elts() {
|
||||
; AVX-LABEL: volatile_load_2_elts:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vmovaps g0(%rip), %xmm0
|
||||
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
|
||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
|
||||
; AVX-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,3,2]
|
||||
; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
|
||||
; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3]
|
||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3]
|
||||
; AVX-NEXT: vmovapd %ymm0, (%rax)
|
||||
; AVX-NEXT: vmovapd %ymm1, (%rax)
|
||||
; AVX-NEXT: vzeroupper
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: volatile_load_2_elts:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vmovaps g0(%rip), %xmm0
|
||||
; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
|
||||
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
|
||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
|
||||
; AVX2-NEXT: vmovaps %ymm0, (%rax)
|
||||
; AVX2-NEXT: vmovaps %ymm2, (%rax)
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
%i = load volatile <2 x double>, <2 x double>* @g0, align 16
|
||||
%i1 = shufflevector <2 x double> %i, <2 x double> poison, <4 x i32> <i32 undef, i32 0, i32 undef, i32 0>
|
||||
%shuffle1 = shufflevector <4 x double> %i1, <4 x double> zeroinitializer, <8 x i32> <i32 6, i32 7, i32 3, i32 6, i32 7, i32 1, i32 7, i32 1>
|
||||
store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64
|
||||
ret void
|
||||
}
|
||||
|
||||
@g1 = external dso_local global <1 x double>, align 16
|
||||
define void @volatile_load_1_elt() {
|
||||
; ALL-LABEL: volatile_load_1_elt:
|
||||
; ALL: # %bb.0:
|
||||
; ALL-NEXT: vbroadcastsd g1(%rip), %ymm0
|
||||
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
|
||||
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
|
||||
; ALL-NEXT: vmovaps %ymm0, (%rax)
|
||||
; ALL-NEXT: vmovaps %ymm2, (%rax)
|
||||
; ALL-NEXT: vzeroupper
|
||||
; ALL-NEXT: retq
|
||||
%i = load volatile <1 x double>, <1 x double>* @g1, align 16
|
||||
%i1 = shufflevector <1 x double> %i, <1 x double> poison, <4 x i32> <i32 undef, i32 0, i32 undef, i32 0>
|
||||
%shuffle1 = shufflevector <4 x double> %i1, <4 x double> zeroinitializer, <8 x i32> <i32 6, i32 7, i32 3, i32 6, i32 7, i32 1, i32 7, i32 1>
|
||||
store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64
|
||||
ret void
|
||||
}
|
||||
|
||||
@g2 = external dso_local global <2 x float>, align 16
|
||||
define void @volatile_load_2_elts_bitcast() {
|
||||
; ALL-LABEL: volatile_load_2_elts_bitcast:
|
||||
; ALL: # %bb.0:
|
||||
; ALL-NEXT: vbroadcastsd g2(%rip), %ymm0
|
||||
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
|
||||
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
|
||||
; ALL-NEXT: vmovaps %ymm0, (%rax)
|
||||
; ALL-NEXT: vmovaps %ymm2, (%rax)
|
||||
; ALL-NEXT: vzeroupper
|
||||
; ALL-NEXT: retq
|
||||
%i0 = load volatile <2 x float>, <2 x float>* @g2, align 16
|
||||
%i = bitcast <2 x float> %i0 to <1 x double>
|
||||
%i1 = shufflevector <1 x double> %i, <1 x double> poison, <4 x i32> <i32 undef, i32 0, i32 undef, i32 0>
|
||||
%shuffle1 = shufflevector <4 x double> %i1, <4 x double> zeroinitializer, <8 x i32> <i32 6, i32 7, i32 3, i32 6, i32 7, i32 1, i32 7, i32 1>
|
||||
store volatile <8 x double> %shuffle1, <8 x double>* undef, align 64
|
||||
ret void
|
||||
}
|
Loading…
Reference in New Issue