From 84e7967fae5faae3871a503f07914559a7d16d1c Mon Sep 17 00:00:00 2001 From: Bob Wilson Date: Fri, 9 Oct 2009 00:01:36 +0000 Subject: [PATCH] Add codegen support for NEON vst4lane intrinsics with 128-bit vectors. llvm-svn: 83600 --- llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp | 62 ++++++++++++++++++++---- llvm/lib/Target/ARM/ARMInstrNEON.td | 17 +++++-- llvm/lib/Target/ARM/NEONPreAllocPass.cpp | 16 ++++++ llvm/test/CodeGen/ARM/vstlane.ll | 28 +++++++++++ 4 files changed, 109 insertions(+), 14 deletions(-) diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp index fd5aac977b99..be3f343b68f3 100644 --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -2025,19 +2025,61 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { SDValue MemAddr, MemUpdate, MemOpc; if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc)) return NULL; - switch (N->getOperand(3).getValueType().getSimpleVT().SimpleTy) { + VT = N->getOperand(3).getValueType(); + if (VT.is64BitVector()) { + switch (VT.getSimpleVT().SimpleTy) { + default: llvm_unreachable("unhandled vst4lane type"); + case MVT::v8i8: Opc = ARM::VST4LNd8; break; + case MVT::v4i16: Opc = ARM::VST4LNd16; break; + case MVT::v2f32: + case MVT::v2i32: Opc = ARM::VST4LNd32; break; + } + SDValue Chain = N->getOperand(0); + const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, + N->getOperand(3), N->getOperand(4), + N->getOperand(5), N->getOperand(6), + N->getOperand(7), Chain }; + return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 9); + } + // Quad registers are handled by extracting subregs and then doing + // the store. + EVT RegVT; + unsigned Opc2 = 0; + switch (VT.getSimpleVT().SimpleTy) { default: llvm_unreachable("unhandled vst4lane type"); - case MVT::v8i8: Opc = ARM::VST4LNd8; break; - case MVT::v4i16: Opc = ARM::VST4LNd16; break; - case MVT::v2f32: - case MVT::v2i32: Opc = ARM::VST4LNd32; break; + case MVT::v8i16: + Opc = ARM::VST4LNq16a; + Opc2 = ARM::VST4LNq16b; + RegVT = MVT::v4i16; + break; + case MVT::v4f32: + Opc = ARM::VST4LNq32a; + Opc2 = ARM::VST4LNq32b; + RegVT = MVT::v2f32; + break; + case MVT::v4i32: + Opc = ARM::VST4LNq32a; + Opc2 = ARM::VST4LNq32b; + RegVT = MVT::v2i32; + break; } SDValue Chain = N->getOperand(0); - const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, - N->getOperand(3), N->getOperand(4), - N->getOperand(5), N->getOperand(6), - N->getOperand(7), Chain }; - return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 9); + unsigned Lane = cast(N->getOperand(7))->getZExtValue(); + unsigned NumElts = RegVT.getVectorNumElements(); + int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1; + + SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT, + N->getOperand(3)); + SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT, + N->getOperand(4)); + SDValue D2 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT, + N->getOperand(5)); + SDValue D3 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT, + N->getOperand(6)); + const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1, D2, D3, + getI32Imm(Lane % NumElts), Chain }; + return CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2, + dl, MVT::Other, Ops, 9); } } } diff --git a/llvm/lib/Target/ARM/ARMInstrNEON.td b/llvm/lib/Target/ARM/ARMInstrNEON.td index 758e781b9767..cd370aa97adb 100644 --- a/llvm/lib/Target/ARM/ARMInstrNEON.td +++ b/llvm/lib/Target/ARM/ARMInstrNEON.td @@ -489,7 +489,7 @@ def VST3LNq16b: VST3LN<0b0110, "vst3.16">; def VST3LNq32b: VST3LN<0b1010, "vst3.32">; // VST4LN : Vector Store (single 4-element structure from one lane) -class VST4LND op11_8, string OpcodeStr> +class VST4LN op11_8, string OpcodeStr> : NLdSt<1,0b00,op11_8,0b0000, (outs), (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4, nohash_imm:$lane), IIC_VST, @@ -497,9 +497,18 @@ class VST4LND op11_8, string OpcodeStr> "\t\\{$src1[$lane],$src2[$lane],$src3[$lane],$src4[$lane]\\}, $addr"), "", []>; -def VST4LNd8 : VST4LND<0b0011, "vst4.8">; -def VST4LNd16 : VST4LND<0b0111, "vst4.16">; -def VST4LNd32 : VST4LND<0b1011, "vst4.32">; +def VST4LNd8 : VST4LN<0b0011, "vst4.8">; +def VST4LNd16 : VST4LN<0b0111, "vst4.16">; +def VST4LNd32 : VST4LN<0b1011, "vst4.32">; + +// vst4 to double-spaced even registers. +def VST4LNq16a: VST4LN<0b0111, "vst4.16">; +def VST4LNq32a: VST4LN<0b1011, "vst4.32">; + +// vst4 to double-spaced odd registers. +def VST4LNq16b: VST4LN<0b0111, "vst4.16">; +def VST4LNq32b: VST4LN<0b1011, "vst4.32">; + } // mayStore = 1, hasExtraSrcRegAllocReq = 1 diff --git a/llvm/lib/Target/ARM/NEONPreAllocPass.cpp b/llvm/lib/Target/ARM/NEONPreAllocPass.cpp index 4c3a8df33e11..821b872ac7cd 100644 --- a/llvm/lib/Target/ARM/NEONPreAllocPass.cpp +++ b/llvm/lib/Target/ARM/NEONPreAllocPass.cpp @@ -278,6 +278,22 @@ static bool isNEONMultiRegOp(int Opcode, unsigned &FirstOpnd, unsigned &NumRegs, Stride = 2; return true; + case ARM::VST4LNq16a: + case ARM::VST4LNq32a: + FirstOpnd = 3; + NumRegs = 4; + Offset = 0; + Stride = 2; + return true; + + case ARM::VST4LNq16b: + case ARM::VST4LNq32b: + FirstOpnd = 3; + NumRegs = 4; + Offset = 1; + Stride = 2; + return true; + case ARM::VTBL2: FirstOpnd = 1; NumRegs = 2; diff --git a/llvm/test/CodeGen/ARM/vstlane.ll b/llvm/test/CodeGen/ARM/vstlane.ll index 19067056f68b..3bfb14f17b77 100644 --- a/llvm/test/CodeGen/ARM/vstlane.ll +++ b/llvm/test/CodeGen/ARM/vstlane.ll @@ -163,7 +163,35 @@ define void @vst4lanef(float* %A, <2 x float>* %B) nounwind { ret void } +define void @vst4laneQi16(i16* %A, <8 x i16>* %B) nounwind { +;CHECK: vst4laneQi16: +;CHECK: vst4.16 + %tmp1 = load <8 x i16>* %B + call void @llvm.arm.neon.vst4lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 7) + ret void +} + +define void @vst4laneQi32(i32* %A, <4 x i32>* %B) nounwind { +;CHECK: vst4laneQi32: +;CHECK: vst4.32 + %tmp1 = load <4 x i32>* %B + call void @llvm.arm.neon.vst4lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2) + ret void +} + +define void @vst4laneQf(float* %A, <4 x float>* %B) nounwind { +;CHECK: vst4laneQf: +;CHECK: vst4.32 + %tmp1 = load <4 x float>* %B + call void @llvm.arm.neon.vst4lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1) + ret void +} + declare void @llvm.arm.neon.vst4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind declare void @llvm.arm.neon.vst4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind declare void @llvm.arm.neon.vst4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind + +declare void @llvm.arm.neon.vst4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind +declare void @llvm.arm.neon.vst4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind +declare void @llvm.arm.neon.vst4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) nounwind