From 6b2ea4a48f5c80d8397061140657dbd22682cc8b Mon Sep 17 00:00:00 2001 From: Renato Golin Date: Thu, 20 Dec 2012 13:52:11 +0000 Subject: [PATCH] Adding support for llvm.arm.neon.vaddl[su].* and llvm.arm.neon.vsub[su].* intrinsics. Patch by Pete Couperus llvm-svn: 170694 --- llvm/include/llvm/IntrinsicsARM.td | 4 ++ llvm/lib/Target/ARM/ARMISelLowering.cpp | 18 +++++++ llvm/lib/Target/ARM/ARMISelLowering.h | 8 ++++ llvm/lib/Target/ARM/ARMInstrNEON.td | 44 +++++++++++++++-- llvm/test/CodeGen/ARM/vadd.ll | 64 +++++++++++++++++++++++++ llvm/test/CodeGen/ARM/vsub.ll | 64 +++++++++++++++++++++++++ 6 files changed, 198 insertions(+), 4 deletions(-) diff --git a/llvm/include/llvm/IntrinsicsARM.td b/llvm/include/llvm/IntrinsicsARM.td index 93b1ae1dc887..6103f29cfce7 100644 --- a/llvm/include/llvm/IntrinsicsARM.td +++ b/llvm/include/llvm/IntrinsicsARM.td @@ -160,6 +160,8 @@ let Properties = [IntrNoMem, Commutative] in { def int_arm_neon_vqaddu : Neon_2Arg_Intrinsic; def int_arm_neon_vaddhn : Neon_2Arg_Narrow_Intrinsic; def int_arm_neon_vraddhn : Neon_2Arg_Narrow_Intrinsic; + def int_arm_neon_vaddls : Neon_2Arg_Long_Intrinsic; + def int_arm_neon_vaddlu : Neon_2Arg_Long_Intrinsic; // Vector Multiply. def int_arm_neon_vmulp : Neon_2Arg_Intrinsic; @@ -196,6 +198,8 @@ def int_arm_neon_vqsubs : Neon_2Arg_Intrinsic; def int_arm_neon_vqsubu : Neon_2Arg_Intrinsic; def int_arm_neon_vsubhn : Neon_2Arg_Narrow_Intrinsic; def int_arm_neon_vrsubhn : Neon_2Arg_Narrow_Intrinsic; +def int_arm_neon_vsubls : Neon_2Arg_Long_Intrinsic; +def int_arm_neon_vsublu : Neon_2Arg_Long_Intrinsic; // Vector Absolute Compare. def int_arm_neon_vacged : Intrinsic<[llvm_v2i32_ty], diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 8034ce160436..1105f412cceb 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -1007,6 +1007,10 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { case ARMISD::VTBL2: return "ARMISD::VTBL2"; case ARMISD::VMULLs: return "ARMISD::VMULLs"; case ARMISD::VMULLu: return "ARMISD::VMULLu"; + case ARMISD::VADDLs: return "ARMISD::VADDLs"; + case ARMISD::VADDLu: return "ARMISD::VADDLu"; + case ARMISD::VSUBLs: return "ARMISD::VSUBLs"; + case ARMISD::VSUBLu: return "ARMISD::VSUBLu"; case ARMISD::UMLAL: return "ARMISD::UMLAL"; case ARMISD::SMLAL: return "ARMISD::SMLAL"; case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; @@ -2429,6 +2433,20 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); } + case Intrinsic::arm_neon_vaddls: + case Intrinsic::arm_neon_vaddlu: { + unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vaddls) + ? ARMISD::VADDLs : ARMISD::VADDLu; + return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); + } + case Intrinsic::arm_neon_vsubls: + case Intrinsic::arm_neon_vsublu: { + unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vsubls) + ? ARMISD::VSUBLs: ARMISD::VSUBLu; + return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); + } } } diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h index 61649a0d4dc7..9cf835ad7b1a 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -174,6 +174,14 @@ namespace llvm { VMULLs, // ...signed VMULLu, // ...unsigned + // Vector add long: + VADDLs, // ...signed + VADDLu, // ...unsigned + + // Vector subtract long: + VSUBLs, // ...signed + VSUBLu, // ...unsigned + UMLAL, // 64bit Unsigned Accumulate Multiply SMLAL, // 64bit Signed Accumulate Multiply diff --git a/llvm/lib/Target/ARM/ARMInstrNEON.td b/llvm/lib/Target/ARM/ARMInstrNEON.td index 697a8d28c54d..c853a4522d82 100644 --- a/llvm/lib/Target/ARM/ARMInstrNEON.td +++ b/llvm/lib/Target/ARM/ARMInstrNEON.td @@ -534,10 +534,16 @@ def NEONzip : SDNode<"ARMISD::VZIP", SDTARMVSHUF2>; def NEONuzp : SDNode<"ARMISD::VUZP", SDTARMVSHUF2>; def NEONtrn : SDNode<"ARMISD::VTRN", SDTARMVSHUF2>; -def SDTARMVMULL : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>, - SDTCisSameAs<1, 2>]>; -def NEONvmulls : SDNode<"ARMISD::VMULLs", SDTARMVMULL>; -def NEONvmullu : SDNode<"ARMISD::VMULLu", SDTARMVMULL>; +def SDTARMVLONG2: SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>, + SDTCisSameAs<1, 2>]>; +def NEONvmulls : SDNode<"ARMISD::VMULLs", SDTARMVLONG2>; +def NEONvmullu : SDNode<"ARMISD::VMULLu", SDTARMVLONG2>; + +def NEONvaddls : SDNode<"ARMISD::VADDLs", SDTARMVLONG2>; +def NEONvaddlu : SDNode<"ARMISD::VADDLu", SDTARMVLONG2>; + +def NEONvsubls : SDNode<"ARMISD::VSUBLs", SDTARMVLONG2>; +def NEONvsublu : SDNode<"ARMISD::VSUBLu", SDTARMVLONG2>; def SDTARMFMAX : SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>]>; @@ -3940,6 +3946,21 @@ defm VADDLs : N3VLExt_QHS<0,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD, "vaddl", "s", add, sext, 1>; defm VADDLu : N3VLExt_QHS<1,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD, "vaddl", "u", add, zext, 1>; + +def : Pat<(v4i32 (NEONvaddlu (v4i16 DPR:$src1), (v4i16 DPR:$src2))), + (v4i32 (VADDLuv4i32 DPR:$src1, DPR:$src2))>; +def : Pat<(v8i16 (NEONvaddlu (v8i8 DPR:$src1), (v8i8 DPR:$src2))), + (v8i16 (VADDLuv8i16 DPR:$src1, DPR:$src2))>; +def : Pat<(v2i64 (NEONvaddlu (v2i32 DPR:$src1), (v2i32 DPR:$src2))), + (v2i64 (VADDLuv2i64 DPR:$src1, DPR:$src2))>; + +def : Pat<(v4i32 (NEONvaddls (v4i16 DPR:$src1), (v4i16 DPR:$src2))), + (v4i32 (VADDLsv4i32 DPR:$src1, DPR:$src2))>; +def : Pat<(v8i16 (NEONvaddls (v8i8 DPR:$src1), (v8i8 DPR:$src2))), + (v8i16 (VADDLsv8i16 DPR:$src1, DPR:$src2))>; +def : Pat<(v2i64 (NEONvaddls (v2i32 DPR:$src1), (v2i32 DPR:$src2))), + (v2i64 (VADDLsv2i64 DPR:$src1, DPR:$src2))>; + // VADDW : Vector Add Wide (Q = Q + D) defm VADDWs : N3VW_QHS<0,1,0b0001,0, "vaddw", "s", add, sext, 0>; defm VADDWu : N3VW_QHS<1,1,0b0001,0, "vaddw", "u", add, zext, 0>; @@ -4230,6 +4251,21 @@ defm VSUBLs : N3VLExt_QHS<0,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD, "vsubl", "s", sub, sext, 0>; defm VSUBLu : N3VLExt_QHS<1,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD, "vsubl", "u", sub, zext, 0>; + +def : Pat<(v4i32 (NEONvsublu (v4i16 DPR:$src1), (v4i16 DPR:$src2))), + (v4i32 (VSUBLuv4i32 DPR:$src1, DPR:$src2))>; +def : Pat<(v8i16 (NEONvsublu (v8i8 DPR:$src1), (v8i8 DPR:$src2))), + (v8i16 (VSUBLuv8i16 DPR:$src1, DPR:$src2))>; +def : Pat<(v2i64 (NEONvsublu (v2i32 DPR:$src1), (v2i32 DPR:$src2))), + (v2i64 (VSUBLuv2i64 DPR:$src1, DPR:$src2))>; + +def : Pat<(v4i32 (NEONvsubls (v4i16 DPR:$src1), (v4i16 DPR:$src2))), + (v4i32 (VSUBLsv4i32 DPR:$src1, DPR:$src2))>; +def : Pat<(v8i16 (NEONvsubls (v8i8 DPR:$src1), (v8i8 DPR:$src2))), + (v8i16 (VSUBLsv8i16 DPR:$src1, DPR:$src2))>; +def : Pat<(v2i64 (NEONvsubls (v2i32 DPR:$src1), (v2i32 DPR:$src2))), + (v2i64 (VSUBLsv2i64 DPR:$src1, DPR:$src2))>; + // VSUBW : Vector Subtract Wide (Q = Q - D) defm VSUBWs : N3VW_QHS<0,1,0b0011,0, "vsubw", "s", sub, sext, 0>; defm VSUBWu : N3VW_QHS<1,1,0b0011,0, "vsubw", "u", sub, zext, 0>; diff --git a/llvm/test/CodeGen/ARM/vadd.ll b/llvm/test/CodeGen/ARM/vadd.ll index a830e968ff78..1cb23c711650 100644 --- a/llvm/test/CodeGen/ARM/vadd.ll +++ b/llvm/test/CodeGen/ARM/vadd.ll @@ -185,6 +185,38 @@ define <2 x i64> @vaddls32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ret <2 x i64> %tmp5 } +define <8 x i16> @vaddls8_intrinsic(<8 x i8>* %A, <8 x i8>* %B) nounwind { +;CHECK: vaddls8_intrinsic: +;CHECK: vaddl.s8 + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B + %tmp3 = call <8 x i16> @llvm.arm.neon.vaddls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) + ret <8 x i16> %tmp3 +} + +define <4 x i32> @vaddls16_intrinsic(<4 x i16>* %A, <4 x i16>* %B) nounwind { +;CHECK: vaddls16_intrinsic: +;CHECK: vaddl.s16 + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = call <4 x i32> @llvm.arm.neon.vaddls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) + ret <4 x i32> %tmp3 +} + +define <2 x i64> @vaddls32_intrinsic(<2 x i32>* %A, <2 x i32>* %B) nounwind { +;CHECK: vaddls32_intrinsic: +;CHECK: vaddl.s32 + %tmp1 = load <2 x i32>* %A + %tmp2 = load <2 x i32>* %B + %tmp3 = call <2 x i64> @llvm.arm.neon.vaddls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) + ret <2 x i64> %tmp3 +} + +declare <8 x i16> @llvm.arm.neon.vaddls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone +declare <4 x i32> @llvm.arm.neon.vaddls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone +declare <2 x i64> @llvm.arm.neon.vaddls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone + + define <8 x i16> @vaddlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK: vaddlu8: ;CHECK: vaddl.u8 @@ -218,6 +250,38 @@ define <2 x i64> @vaddlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ret <2 x i64> %tmp5 } +define <8 x i16> @vaddlu8_intrinsic(<8 x i8>* %A, <8 x i8>* %B) nounwind { +;CHECK: vaddlu8_intrinsic: +;CHECK: vaddl.u8 + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B + %tmp3 = call <8 x i16> @llvm.arm.neon.vaddlu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) + ret <8 x i16> %tmp3 +} + +define <4 x i32> @vaddlu16_intrinsic(<4 x i16>* %A, <4 x i16>* %B) nounwind { +;CHECK: vaddlu16_intrinsic: +;CHECK: vaddl.u16 + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = call <4 x i32> @llvm.arm.neon.vaddlu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) + ret <4 x i32> %tmp3 +} + +define <2 x i64> @vaddlu32_intrinsic(<2 x i32>* %A, <2 x i32>* %B) nounwind { +;CHECK: vaddlu32_intrinsic: +;CHECK: vaddl.u32 + %tmp1 = load <2 x i32>* %A + %tmp2 = load <2 x i32>* %B + %tmp3 = call <2 x i64> @llvm.arm.neon.vaddlu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) + ret <2 x i64> %tmp3 +} + +declare <8 x i16> @llvm.arm.neon.vaddlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone +declare <4 x i32> @llvm.arm.neon.vaddlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone +declare <2 x i64> @llvm.arm.neon.vaddlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone + + define <8 x i16> @vaddws8(<8 x i16>* %A, <8 x i8>* %B) nounwind { ;CHECK: vaddws8: ;CHECK: vaddw.s8 diff --git a/llvm/test/CodeGen/ARM/vsub.ll b/llvm/test/CodeGen/ARM/vsub.ll index df77bb31fc8b..f64b897598b3 100644 --- a/llvm/test/CodeGen/ARM/vsub.ll +++ b/llvm/test/CodeGen/ARM/vsub.ll @@ -185,6 +185,38 @@ define <2 x i64> @vsubls32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ret <2 x i64> %tmp5 } +define <8 x i16> @vsubls8_intrinsic(<8 x i8>* %A, <8 x i8>* %B) nounwind { +;CHECK: vsubls8_intrinsic: +;CHECK: vsubl.s8 + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B + %tmp3 = call <8 x i16> @llvm.arm.neon.vsubls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) + ret <8 x i16> %tmp3 +} + +define <4 x i32> @vsubls16_intrinsic(<4 x i16>* %A, <4 x i16>* %B) nounwind { +;CHECK: vsubls16_intrinsic: +;CHECK: vsubl.s16 + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = call <4 x i32> @llvm.arm.neon.vsubls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) + ret <4 x i32> %tmp3 +} + +define <2 x i64> @vsubls32_intrinsic(<2 x i32>* %A, <2 x i32>* %B) nounwind { +;CHECK: vsubls32_intrinsic: +;CHECK: vsubl.s32 + %tmp1 = load <2 x i32>* %A + %tmp2 = load <2 x i32>* %B + %tmp3 = call <2 x i64> @llvm.arm.neon.vsubls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) + ret <2 x i64> %tmp3 +} + +declare <8 x i16> @llvm.arm.neon.vsubls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone +declare <4 x i32> @llvm.arm.neon.vsubls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone +declare <2 x i64> @llvm.arm.neon.vsubls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone + + define <8 x i16> @vsublu8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK: vsublu8: ;CHECK: vsubl.u8 @@ -218,6 +250,38 @@ define <2 x i64> @vsublu32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ret <2 x i64> %tmp5 } +define <8 x i16> @vsublu8_intrinsic(<8 x i8>* %A, <8 x i8>* %B) nounwind { +;CHECK: vsublu8_intrinsic: +;CHECK: vsubl.u8 + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B + %tmp3 = call <8 x i16> @llvm.arm.neon.vsublu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) + ret <8 x i16> %tmp3 +} + +define <4 x i32> @vsublu16_intrinsic(<4 x i16>* %A, <4 x i16>* %B) nounwind { +;CHECK: vsublu16_intrinsic: +;CHECK: vsubl.u16 + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = call <4 x i32> @llvm.arm.neon.vsublu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) + ret <4 x i32> %tmp3 +} + +define <2 x i64> @vsublu32_intrinsic(<2 x i32>* %A, <2 x i32>* %B) nounwind { +;CHECK: vsublu32_intrinsic: +;CHECK: vsubl.u32 + %tmp1 = load <2 x i32>* %A + %tmp2 = load <2 x i32>* %B + %tmp3 = call <2 x i64> @llvm.arm.neon.vsublu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) + ret <2 x i64> %tmp3 +} + +declare <8 x i16> @llvm.arm.neon.vsublu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone +declare <4 x i32> @llvm.arm.neon.vsublu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone +declare <2 x i64> @llvm.arm.neon.vsublu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone + + define <8 x i16> @vsubws8(<8 x i16>* %A, <8 x i8>* %B) nounwind { ;CHECK: vsubws8: ;CHECK: vsubw.s8