forked from OSchip/llvm-project
[AMDGPU] Custom lower INSERT_SUBVECTOR v3, v4, v5, v8
Summary: Since the changes to introduce vec3 and vec5, INSERT_VECTOR for these sizes has been marked "expand", which made LegalizeDAG lower it to loads and stores via a stack slot. The code got optimized a bit later, but the now-unused stack slot was never deleted. This commit avoids that problem by custom lowering INSERT_SUBVECTOR into an EXTRACT_VECTOR_ELT and INSERT_VECTOR_ELT for each element in the subvector to insert. V2: Addressed review comments re test. Differential Revision: https://reviews.llvm.org/D63160 Change-Id: I9e3c13e36f68cfa3431bb9814851cc1f673274e1 llvm-svn: 365148
This commit is contained in:
parent
75b5edf6a1
commit
5816889c74
|
@ -335,16 +335,16 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
|
|||
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom);
|
||||
|
||||
// Deal with vec3 vector operations when widened to vec4.
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Expand);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Expand);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Expand);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Expand);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom);
|
||||
|
||||
// Deal with vec5 vector operations when widened to vec8.
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Expand);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Expand);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Expand);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Expand);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom);
|
||||
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom);
|
||||
|
||||
// BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
|
||||
// and output demarshalling
|
||||
|
@ -3956,6 +3956,8 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
|||
case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
|
||||
case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
|
||||
case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
|
||||
case ISD::INSERT_SUBVECTOR:
|
||||
return lowerINSERT_SUBVECTOR(Op, DAG);
|
||||
case ISD::INSERT_VECTOR_ELT:
|
||||
return lowerINSERT_VECTOR_ELT(Op, DAG);
|
||||
case ISD::EXTRACT_VECTOR_ELT:
|
||||
|
@ -4628,6 +4630,32 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
|
|||
return DAG.getUNDEF(ASC->getValueType(0));
|
||||
}
|
||||
|
||||
// This lowers an INSERT_SUBVECTOR by extracting the individual elements from
|
||||
// the small vector and inserting them into the big vector. That is better than
|
||||
// the default expansion of doing it via a stack slot. Even though the use of
|
||||
// the stack slot would be optimized away afterwards, the stack slot itself
|
||||
// remains.
|
||||
SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
SDValue Vec = Op.getOperand(0);
|
||||
SDValue Ins = Op.getOperand(1);
|
||||
SDValue Idx = Op.getOperand(2);
|
||||
EVT VecVT = Vec.getValueType();
|
||||
EVT InsVT = Ins.getValueType();
|
||||
EVT EltVT = VecVT.getVectorElementType();
|
||||
unsigned InsNumElts = InsVT.getVectorNumElements();
|
||||
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
|
||||
SDLoc SL(Op);
|
||||
|
||||
for (unsigned I = 0; I != InsNumElts; ++I) {
|
||||
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins,
|
||||
DAG.getConstant(I, SL, MVT::i32));
|
||||
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt,
|
||||
DAG.getConstant(IdxVal + I, SL, MVT::i32));
|
||||
}
|
||||
return Vec;
|
||||
}
|
||||
|
||||
SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
SDValue Vec = Op.getOperand(0);
|
||||
|
|
|
@ -121,6 +121,7 @@ private:
|
|||
SelectionDAG &DAG) const;
|
||||
|
||||
SDValue lowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
; RUN: llc -mtriple amdgcn-amd-- -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
|
||||
|
||||
; Before the fix that this test was committed with, this code would leave
|
||||
; an unused stack slot, causing ScratchSize to be non-zero.
|
||||
|
||||
; GCN-LABEL: store_v3i32:
|
||||
; GCN: ds_read_b64
|
||||
; GCN: ds_read_b32
|
||||
; GCN: ds_write_b32
|
||||
; GCN: ds_write_b64
|
||||
; GCN: ScratchSize: 0
|
||||
define amdgpu_kernel void @store_v3i32(<3 x i32> addrspace(3)* %out, <3 x i32> %a) nounwind {
|
||||
%val = load <3 x i32>, <3 x i32> addrspace(3)* %out
|
||||
%val.1 = add <3 x i32> %a, %val
|
||||
store <3 x i32> %val.1, <3 x i32> addrspace(3)* %out, align 16
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: store_v5i32:
|
||||
; GCN: ds_read2_b64
|
||||
; GCN: ds_read_b32
|
||||
; GCN: ds_write_b32
|
||||
; GCN: ds_write2_b64
|
||||
; GCN: ScratchSize: 0
|
||||
define amdgpu_kernel void @store_v5i32(<5 x i32> addrspace(3)* %out, <5 x i32> %a) nounwind {
|
||||
%val = load <5 x i32>, <5 x i32> addrspace(3)* %out
|
||||
%val.1 = add <5 x i32> %a, %val
|
||||
store <5 x i32> %val.1, <5 x i32> addrspace(3)* %out, align 16
|
||||
ret void
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue