Add codegen support for NEON vst4lane intrinsics with 128-bit vectors.

llvm-svn: 83600
This commit is contained in:
Bob Wilson 2009-10-09 00:01:36 +00:00
parent acdc3158b3
commit 84e7967fae
4 changed files with 109 additions and 14 deletions

View File

@ -2025,19 +2025,61 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
SDValue MemAddr, MemUpdate, MemOpc;
if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
return NULL;
switch (N->getOperand(3).getValueType().getSimpleVT().SimpleTy) {
VT = N->getOperand(3).getValueType();
if (VT.is64BitVector()) {
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("unhandled vst4lane type");
case MVT::v8i8: Opc = ARM::VST4LNd8; break;
case MVT::v4i16: Opc = ARM::VST4LNd16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VST4LNd32; break;
}
SDValue Chain = N->getOperand(0);
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
N->getOperand(3), N->getOperand(4),
N->getOperand(5), N->getOperand(6),
N->getOperand(7), Chain };
return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 9);
}
// Quad registers are handled by extracting subregs and then doing
// the store.
EVT RegVT;
unsigned Opc2 = 0;
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("unhandled vst4lane type");
case MVT::v8i8: Opc = ARM::VST4LNd8; break;
case MVT::v4i16: Opc = ARM::VST4LNd16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VST4LNd32; break;
case MVT::v8i16:
Opc = ARM::VST4LNq16a;
Opc2 = ARM::VST4LNq16b;
RegVT = MVT::v4i16;
break;
case MVT::v4f32:
Opc = ARM::VST4LNq32a;
Opc2 = ARM::VST4LNq32b;
RegVT = MVT::v2f32;
break;
case MVT::v4i32:
Opc = ARM::VST4LNq32a;
Opc2 = ARM::VST4LNq32b;
RegVT = MVT::v2i32;
break;
}
SDValue Chain = N->getOperand(0);
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
N->getOperand(3), N->getOperand(4),
N->getOperand(5), N->getOperand(6),
N->getOperand(7), Chain };
return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 9);
unsigned Lane = cast<ConstantSDNode>(N->getOperand(7))->getZExtValue();
unsigned NumElts = RegVT.getVectorNumElements();
int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
N->getOperand(3));
SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
N->getOperand(4));
SDValue D2 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
N->getOperand(5));
SDValue D3 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
N->getOperand(6));
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1, D2, D3,
getI32Imm(Lane % NumElts), Chain };
return CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
dl, MVT::Other, Ops, 9);
}
}
}

View File

@ -489,7 +489,7 @@ def VST3LNq16b: VST3LN<0b0110, "vst3.16">;
def VST3LNq32b: VST3LN<0b1010, "vst3.32">;
// VST4LN : Vector Store (single 4-element structure from one lane)
class VST4LND<bits<4> op11_8, string OpcodeStr>
class VST4LN<bits<4> op11_8, string OpcodeStr>
: NLdSt<1,0b00,op11_8,0b0000, (outs),
(ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
nohash_imm:$lane), IIC_VST,
@ -497,9 +497,18 @@ class VST4LND<bits<4> op11_8, string OpcodeStr>
"\t\\{$src1[$lane],$src2[$lane],$src3[$lane],$src4[$lane]\\}, $addr"),
"", []>;
def VST4LNd8 : VST4LND<0b0011, "vst4.8">;
def VST4LNd16 : VST4LND<0b0111, "vst4.16">;
def VST4LNd32 : VST4LND<0b1011, "vst4.32">;
def VST4LNd8 : VST4LN<0b0011, "vst4.8">;
def VST4LNd16 : VST4LN<0b0111, "vst4.16">;
def VST4LNd32 : VST4LN<0b1011, "vst4.32">;
// vst4 to double-spaced even registers.
def VST4LNq16a: VST4LN<0b0111, "vst4.16">;
def VST4LNq32a: VST4LN<0b1011, "vst4.32">;
// vst4 to double-spaced odd registers.
def VST4LNq16b: VST4LN<0b0111, "vst4.16">;
def VST4LNq32b: VST4LN<0b1011, "vst4.32">;
} // mayStore = 1, hasExtraSrcRegAllocReq = 1

View File

@ -278,6 +278,22 @@ static bool isNEONMultiRegOp(int Opcode, unsigned &FirstOpnd, unsigned &NumRegs,
Stride = 2;
return true;
case ARM::VST4LNq16a:
case ARM::VST4LNq32a:
FirstOpnd = 3;
NumRegs = 4;
Offset = 0;
Stride = 2;
return true;
case ARM::VST4LNq16b:
case ARM::VST4LNq32b:
FirstOpnd = 3;
NumRegs = 4;
Offset = 1;
Stride = 2;
return true;
case ARM::VTBL2:
FirstOpnd = 1;
NumRegs = 2;

View File

@ -163,7 +163,35 @@ define void @vst4lanef(float* %A, <2 x float>* %B) nounwind {
ret void
}
define void @vst4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
;CHECK: vst4laneQi16:
;CHECK: vst4.16
%tmp1 = load <8 x i16>* %B
call void @llvm.arm.neon.vst4lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 7)
ret void
}
define void @vst4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
;CHECK: vst4laneQi32:
;CHECK: vst4.32
%tmp1 = load <4 x i32>* %B
call void @llvm.arm.neon.vst4lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2)
ret void
}
define void @vst4laneQf(float* %A, <4 x float>* %B) nounwind {
;CHECK: vst4laneQf:
;CHECK: vst4.32
%tmp1 = load <4 x float>* %B
call void @llvm.arm.neon.vst4lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
ret void
}
declare void @llvm.arm.neon.vst4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind
declare void @llvm.arm.neon.vst4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind
declare void @llvm.arm.neon.vst4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind
declare void @llvm.arm.neon.vst4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind
declare void @llvm.arm.neon.vst4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
declare void @llvm.arm.neon.vst4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) nounwind