forked from OSchip/llvm-project
[X86][AVX] Peek through bitcasts to find the source of broadcasts (reapplied)
AVX1 can only broadcast vectors as floats/doubles, so for 256-bit vectors we insert bitcasts if we are shuffling v8i32/v4i64 types. Unfortunately the presence of these bitcasts prevents the current broadcast lowering code from peeking through cases where we have concatenated / extracted vectors to create the 256-bit vectors. This patch allows us to peek through bitcasts as long as the number of elements doesn't change (i.e. element bitwidth is the same) so the broadcast index is not affected. Note this bitcast peek is different from the stage later on which doesn't care about the type and is just trying to find a load node. As we're being more aggressive with bitcasts, we also need to ensure that the broadcast type is correctly bitcasted Differential Revision: http://reviews.llvm.org/D21660 llvm-svn: 274013
This commit is contained in:
parent
7cc4cfe4fc
commit
5f71c909f0
|
@ -8514,13 +8514,20 @@ static SDValue lowerVectorShuffleAsBroadcast(const SDLoc &DL, MVT VT,
|
|||
SDValue V = V1;
|
||||
for (;;) {
|
||||
switch (V.getOpcode()) {
|
||||
case ISD::BITCAST: {
|
||||
SDValue VSrc = V.getOperand(0);
|
||||
MVT SrcVT = VSrc.getSimpleValueType();
|
||||
if (VT.getScalarSizeInBits() != SrcVT.getScalarSizeInBits())
|
||||
break;
|
||||
V = VSrc;
|
||||
continue;
|
||||
}
|
||||
case ISD::CONCAT_VECTORS: {
|
||||
int OperandSize = Mask.size() / V.getNumOperands();
|
||||
V = V.getOperand(BroadcastIdx / OperandSize);
|
||||
BroadcastIdx %= OperandSize;
|
||||
continue;
|
||||
}
|
||||
|
||||
case ISD::INSERT_SUBVECTOR: {
|
||||
SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
|
||||
auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
|
||||
|
@ -8567,8 +8574,10 @@ static SDValue lowerVectorShuffleAsBroadcast(const SDLoc &DL, MVT VT,
|
|||
return SDValue();
|
||||
} else if (MayFoldLoad(BC) && !cast<LoadSDNode>(BC)->isVolatile()) {
|
||||
// 32-bit targets need to load i64 as a f64 and then bitcast the result.
|
||||
if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64)
|
||||
if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
|
||||
BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
|
||||
Opcode = (BroadcastVT.is128BitVector() ? X86ISD::MOVDDUP : Opcode);
|
||||
}
|
||||
|
||||
// If we are broadcasting a load that is only used by the shuffle
|
||||
// then we can reduce the vector load to the broadcasted scalar load.
|
||||
|
@ -8605,7 +8614,22 @@ static SDValue lowerVectorShuffleAsBroadcast(const SDLoc &DL, MVT VT,
|
|||
}
|
||||
|
||||
if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
|
||||
V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V);
|
||||
V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
|
||||
DAG.getBitcast(MVT::f64, V));
|
||||
|
||||
// Bitcast back to the same scalar type as BroadcastVT.
|
||||
MVT SrcVT = V.getSimpleValueType();
|
||||
if (SrcVT.getScalarType() != BroadcastVT.getScalarType()) {
|
||||
assert(SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits() &&
|
||||
"Unexpected vector element size");
|
||||
if (SrcVT.isVector()) {
|
||||
unsigned NumSrcElts = SrcVT.getVectorNumElements();
|
||||
SrcVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
|
||||
} else {
|
||||
SrcVT = BroadcastVT.getScalarType();
|
||||
}
|
||||
V = DAG.getBitcast(SrcVT, V);
|
||||
}
|
||||
|
||||
return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
|
||||
}
|
||||
|
|
|
@ -173,14 +173,12 @@ define <8 x i32> @load_splat_8i32_4i32_33333333(<4 x i32>* %ptr) nounwind uwtabl
|
|||
; X32-LABEL: load_splat_8i32_4i32_33333333:
|
||||
; X32: ## BB#0: ## %entry
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: vpshufd {{.*#+}} xmm0 = mem[3,3,3,3]
|
||||
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; X32-NEXT: vbroadcastss 12(%eax), %ymm0
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: load_splat_8i32_4i32_33333333:
|
||||
; X64: ## BB#0: ## %entry
|
||||
; X64-NEXT: vpshufd {{.*#+}} xmm0 = mem[3,3,3,3]
|
||||
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; X64-NEXT: vbroadcastss 12(%rdi), %ymm0
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%ld = load <4 x i32>, <4 x i32>* %ptr
|
||||
|
@ -277,14 +275,12 @@ define <4 x i64> @load_splat_4i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable re
|
|||
; X32-LABEL: load_splat_4i64_2i64_1111:
|
||||
; X32: ## BB#0: ## %entry
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,3,2,3]
|
||||
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; X32-NEXT: vbroadcastsd 8(%eax), %ymm0
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: load_splat_4i64_2i64_1111:
|
||||
; X64: ## BB#0: ## %entry
|
||||
; X64-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,3,2,3]
|
||||
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; X64-NEXT: vbroadcastsd 8(%rdi), %ymm0
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%ld = load <2 x i64>, <2 x i64>* %ptr
|
||||
|
|
|
@ -442,8 +442,7 @@ define <2 x i64> @load_splat_2i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable re
|
|||
; X32-LABEL: load_splat_2i64_2i64_1111:
|
||||
; X32: ## BB#0: ## %entry
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
|
||||
; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: load_splat_2i64_2i64_1111:
|
||||
|
|
|
@ -1952,16 +1952,30 @@ define <4 x i32> @mask_v4i32_0127(<4 x i32> %a, <4 x i32> %b) {
|
|||
}
|
||||
|
||||
define <4 x float> @broadcast_v4f32_0101_from_v2f32(<2 x float>* %x) {
|
||||
; SSE-LABEL: broadcast_v4f32_0101_from_v2f32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
|
||||
; SSE-NEXT: retq
|
||||
; SSE2-LABEL: broadcast_v4f32_0101_from_v2f32:
|
||||
; SSE2: # BB#0:
|
||||
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE3-LABEL: broadcast_v4f32_0101_from_v2f32:
|
||||
; SSE3: # BB#0:
|
||||
; SSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
|
||||
; SSE3-NEXT: retq
|
||||
;
|
||||
; SSSE3-LABEL: broadcast_v4f32_0101_from_v2f32:
|
||||
; SSSE3: # BB#0:
|
||||
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
|
||||
; SSSE3-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: broadcast_v4f32_0101_from_v2f32:
|
||||
; SSE41: # BB#0:
|
||||
; SSE41-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: broadcast_v4f32_0101_from_v2f32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
|
||||
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
|
||||
; AVX-NEXT: retq
|
||||
%1 = load <2 x float>, <2 x float>* %x, align 1
|
||||
%2 = shufflevector <2 x float> %1, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
|
||||
|
|
|
@ -1320,8 +1320,7 @@ define <4 x double> @splat_v4f64(<2 x double> %r) {
|
|||
define <4 x i64> @splat_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
|
||||
; AVX1-LABEL: splat_mem_v4i64_from_v2i64:
|
||||
; AVX1: # BB#0:
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,1,0,1]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: splat_mem_v4i64_from_v2i64:
|
||||
|
|
Loading…
Reference in New Issue