forked from OSchip/llvm-project
ARM: use natural LLVM IR for vshll instructions
Similarly to the vshrn instructions, these are simple zext/sext + trunc operations. Using normal LLVM IR should allow for better code, and more sharing with the AArch64 backend. llvm-svn: 201093
This commit is contained in:
parent
78c135942d
commit
b0430415e6
|
@ -287,8 +287,6 @@ def int_arm_neon_vpminu : Neon_2Arg_Intrinsic;
|
||||||
// Vector Shift.
|
// Vector Shift.
|
||||||
def int_arm_neon_vshifts : Neon_2Arg_Intrinsic;
|
def int_arm_neon_vshifts : Neon_2Arg_Intrinsic;
|
||||||
def int_arm_neon_vshiftu : Neon_2Arg_Intrinsic;
|
def int_arm_neon_vshiftu : Neon_2Arg_Intrinsic;
|
||||||
def int_arm_neon_vshiftls : Neon_2Arg_Long_Intrinsic;
|
|
||||||
def int_arm_neon_vshiftlu : Neon_2Arg_Long_Intrinsic;
|
|
||||||
|
|
||||||
// Vector Rounding Shift.
|
// Vector Rounding Shift.
|
||||||
def int_arm_neon_vrshifts : Neon_2Arg_Intrinsic;
|
def int_arm_neon_vrshifts : Neon_2Arg_Intrinsic;
|
||||||
|
|
|
@ -1078,9 +1078,6 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
|
||||||
case ARMISD::VSHL: return "ARMISD::VSHL";
|
case ARMISD::VSHL: return "ARMISD::VSHL";
|
||||||
case ARMISD::VSHRs: return "ARMISD::VSHRs";
|
case ARMISD::VSHRs: return "ARMISD::VSHRs";
|
||||||
case ARMISD::VSHRu: return "ARMISD::VSHRu";
|
case ARMISD::VSHRu: return "ARMISD::VSHRu";
|
||||||
case ARMISD::VSHLLs: return "ARMISD::VSHLLs";
|
|
||||||
case ARMISD::VSHLLu: return "ARMISD::VSHLLu";
|
|
||||||
case ARMISD::VSHLLi: return "ARMISD::VSHLLi";
|
|
||||||
case ARMISD::VRSHRs: return "ARMISD::VRSHRs";
|
case ARMISD::VRSHRs: return "ARMISD::VRSHRs";
|
||||||
case ARMISD::VRSHRu: return "ARMISD::VRSHRu";
|
case ARMISD::VRSHRu: return "ARMISD::VRSHRu";
|
||||||
case ARMISD::VRSHRN: return "ARMISD::VRSHRN";
|
case ARMISD::VRSHRN: return "ARMISD::VRSHRN";
|
||||||
|
@ -9714,8 +9711,6 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
|
||||||
// loads from a constant pool.
|
// loads from a constant pool.
|
||||||
case Intrinsic::arm_neon_vshifts:
|
case Intrinsic::arm_neon_vshifts:
|
||||||
case Intrinsic::arm_neon_vshiftu:
|
case Intrinsic::arm_neon_vshiftu:
|
||||||
case Intrinsic::arm_neon_vshiftls:
|
|
||||||
case Intrinsic::arm_neon_vshiftlu:
|
|
||||||
case Intrinsic::arm_neon_vrshifts:
|
case Intrinsic::arm_neon_vrshifts:
|
||||||
case Intrinsic::arm_neon_vrshiftu:
|
case Intrinsic::arm_neon_vrshiftu:
|
||||||
case Intrinsic::arm_neon_vrshiftn:
|
case Intrinsic::arm_neon_vrshiftn:
|
||||||
|
@ -9746,12 +9741,6 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
|
||||||
}
|
}
|
||||||
return SDValue();
|
return SDValue();
|
||||||
|
|
||||||
case Intrinsic::arm_neon_vshiftls:
|
|
||||||
case Intrinsic::arm_neon_vshiftlu:
|
|
||||||
if (isVShiftLImm(N->getOperand(2), VT, true, Cnt))
|
|
||||||
break;
|
|
||||||
llvm_unreachable("invalid shift count for vshll intrinsic");
|
|
||||||
|
|
||||||
case Intrinsic::arm_neon_vrshifts:
|
case Intrinsic::arm_neon_vrshifts:
|
||||||
case Intrinsic::arm_neon_vrshiftu:
|
case Intrinsic::arm_neon_vrshiftu:
|
||||||
if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
|
if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
|
||||||
|
@ -9791,14 +9780,6 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
|
||||||
case Intrinsic::arm_neon_vshiftu:
|
case Intrinsic::arm_neon_vshiftu:
|
||||||
// Opcode already set above.
|
// Opcode already set above.
|
||||||
break;
|
break;
|
||||||
case Intrinsic::arm_neon_vshiftls:
|
|
||||||
case Intrinsic::arm_neon_vshiftlu:
|
|
||||||
if (Cnt == VT.getVectorElementType().getSizeInBits())
|
|
||||||
VShiftOpc = ARMISD::VSHLLi;
|
|
||||||
else
|
|
||||||
VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ?
|
|
||||||
ARMISD::VSHLLs : ARMISD::VSHLLu);
|
|
||||||
break;
|
|
||||||
case Intrinsic::arm_neon_vrshifts:
|
case Intrinsic::arm_neon_vrshifts:
|
||||||
VShiftOpc = ARMISD::VRSHRs; break;
|
VShiftOpc = ARMISD::VRSHRs; break;
|
||||||
case Intrinsic::arm_neon_vrshiftu:
|
case Intrinsic::arm_neon_vrshiftu:
|
||||||
|
|
|
@ -113,9 +113,6 @@ namespace llvm {
|
||||||
VSHL, // ...left
|
VSHL, // ...left
|
||||||
VSHRs, // ...right (signed)
|
VSHRs, // ...right (signed)
|
||||||
VSHRu, // ...right (unsigned)
|
VSHRu, // ...right (unsigned)
|
||||||
VSHLLs, // ...left long (signed)
|
|
||||||
VSHLLu, // ...left long (unsigned)
|
|
||||||
VSHLLi, // ...left long (with maximum shift count)
|
|
||||||
|
|
||||||
// Vector rounding shift by immediate:
|
// Vector rounding shift by immediate:
|
||||||
VRSHRs, // ...right (signed)
|
VRSHRs, // ...right (signed)
|
||||||
|
|
|
@ -466,9 +466,6 @@ def SDTARMVSHINS : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
|
||||||
def NEONvshl : SDNode<"ARMISD::VSHL", SDTARMVSH>;
|
def NEONvshl : SDNode<"ARMISD::VSHL", SDTARMVSH>;
|
||||||
def NEONvshrs : SDNode<"ARMISD::VSHRs", SDTARMVSH>;
|
def NEONvshrs : SDNode<"ARMISD::VSHRs", SDTARMVSH>;
|
||||||
def NEONvshru : SDNode<"ARMISD::VSHRu", SDTARMVSH>;
|
def NEONvshru : SDNode<"ARMISD::VSHRu", SDTARMVSH>;
|
||||||
def NEONvshlls : SDNode<"ARMISD::VSHLLs", SDTARMVSHX>;
|
|
||||||
def NEONvshllu : SDNode<"ARMISD::VSHLLu", SDTARMVSHX>;
|
|
||||||
def NEONvshlli : SDNode<"ARMISD::VSHLLi", SDTARMVSHX>;
|
|
||||||
def NEONvshrn : SDNode<"ARMISD::VSHRN", SDTARMVSHX>;
|
def NEONvshrn : SDNode<"ARMISD::VSHRN", SDTARMVSHX>;
|
||||||
|
|
||||||
def NEONvrshrs : SDNode<"ARMISD::VRSHRs", SDTARMVSH>;
|
def NEONvrshrs : SDNode<"ARMISD::VRSHRs", SDTARMVSH>;
|
||||||
|
@ -3038,12 +3035,12 @@ class N2VQSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
|
||||||
// Long shift by immediate.
|
// Long shift by immediate.
|
||||||
class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
|
class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
|
||||||
string OpcodeStr, string Dt,
|
string OpcodeStr, string Dt,
|
||||||
ValueType ResTy, ValueType OpTy, Operand ImmTy, SDNode OpNode>
|
ValueType ResTy, ValueType OpTy, Operand ImmTy,
|
||||||
|
SDPatternOperator OpNode>
|
||||||
: N2VImm<op24, op23, op11_8, op7, op6, op4,
|
: N2VImm<op24, op23, op11_8, op7, op6, op4,
|
||||||
(outs QPR:$Vd), (ins DPR:$Vm, ImmTy:$SIMM), N2RegVShLFrm,
|
(outs QPR:$Vd), (ins DPR:$Vm, ImmTy:$SIMM), N2RegVShLFrm,
|
||||||
IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
|
IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
|
||||||
[(set QPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm),
|
[(set QPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm), ImmTy:$SIMM)))]>;
|
||||||
(i32 imm:$SIMM))))]>;
|
|
||||||
|
|
||||||
// Narrow shift by immediate.
|
// Narrow shift by immediate.
|
||||||
class N2VNSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
|
class N2VNSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
|
||||||
|
@ -3942,7 +3939,8 @@ multiclass N2VShInsR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
|
||||||
// Neon Shift Long operations,
|
// Neon Shift Long operations,
|
||||||
// element sizes of 8, 16, 32 bits:
|
// element sizes of 8, 16, 32 bits:
|
||||||
multiclass N2VLSh_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
|
multiclass N2VLSh_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
|
||||||
bit op4, string OpcodeStr, string Dt, SDNode OpNode> {
|
bit op4, string OpcodeStr, string Dt,
|
||||||
|
SDPatternOperator OpNode> {
|
||||||
def v8i16 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
|
def v8i16 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
|
||||||
OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, imm1_7, OpNode> {
|
OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, imm1_7, OpNode> {
|
||||||
let Inst{21-19} = 0b001; // imm6 = 001xxx
|
let Inst{21-19} = 0b001; // imm6 = 001xxx
|
||||||
|
@ -4947,24 +4945,39 @@ defm VSHRu : N2VShR_QHSD<1, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "u", "VSHRu",
|
||||||
NEONvshru>;
|
NEONvshru>;
|
||||||
|
|
||||||
// VSHLL : Vector Shift Left Long
|
// VSHLL : Vector Shift Left Long
|
||||||
defm VSHLLs : N2VLSh_QHS<0, 1, 0b1010, 0, 0, 1, "vshll", "s", NEONvshlls>;
|
defm VSHLLs : N2VLSh_QHS<0, 1, 0b1010, 0, 0, 1, "vshll", "s",
|
||||||
defm VSHLLu : N2VLSh_QHS<1, 1, 0b1010, 0, 0, 1, "vshll", "u", NEONvshllu>;
|
PatFrag<(ops node:$LHS, node:$RHS), (NEONvshl (sext node:$LHS), node:$RHS)>>;
|
||||||
|
defm VSHLLu : N2VLSh_QHS<1, 1, 0b1010, 0, 0, 1, "vshll", "u",
|
||||||
|
PatFrag<(ops node:$LHS, node:$RHS), (NEONvshl (zext node:$LHS), node:$RHS)>>;
|
||||||
|
|
||||||
// VSHLL : Vector Shift Left Long (with maximum shift count)
|
// VSHLL : Vector Shift Left Long (with maximum shift count)
|
||||||
class N2VLShMax<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
|
class N2VLShMax<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
|
||||||
bit op6, bit op4, string OpcodeStr, string Dt, ValueType ResTy,
|
bit op6, bit op4, string OpcodeStr, string Dt, ValueType ResTy,
|
||||||
ValueType OpTy, Operand ImmTy, SDNode OpNode>
|
ValueType OpTy, Operand ImmTy>
|
||||||
: N2VLSh<op24, op23, op11_8, op7, op6, op4, OpcodeStr, Dt,
|
: N2VLSh<op24, op23, op11_8, op7, op6, op4, OpcodeStr, Dt,
|
||||||
ResTy, OpTy, ImmTy, OpNode> {
|
ResTy, OpTy, ImmTy, null_frag> {
|
||||||
let Inst{21-16} = op21_16;
|
let Inst{21-16} = op21_16;
|
||||||
let DecoderMethod = "DecodeVSHLMaxInstruction";
|
let DecoderMethod = "DecodeVSHLMaxInstruction";
|
||||||
}
|
}
|
||||||
def VSHLLi8 : N2VLShMax<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll", "i8",
|
def VSHLLi8 : N2VLShMax<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll", "i8",
|
||||||
v8i16, v8i8, imm8, NEONvshlli>;
|
v8i16, v8i8, imm8>;
|
||||||
def VSHLLi16 : N2VLShMax<1, 1, 0b110110, 0b0011, 0, 0, 0, "vshll", "i16",
|
def VSHLLi16 : N2VLShMax<1, 1, 0b110110, 0b0011, 0, 0, 0, "vshll", "i16",
|
||||||
v4i32, v4i16, imm16, NEONvshlli>;
|
v4i32, v4i16, imm16>;
|
||||||
def VSHLLi32 : N2VLShMax<1, 1, 0b111010, 0b0011, 0, 0, 0, "vshll", "i32",
|
def VSHLLi32 : N2VLShMax<1, 1, 0b111010, 0b0011, 0, 0, 0, "vshll", "i32",
|
||||||
v2i64, v2i32, imm32, NEONvshlli>;
|
v2i64, v2i32, imm32>;
|
||||||
|
|
||||||
|
def : Pat<(v8i16 (NEONvshl (zext (v8i8 DPR:$Rn)), (i32 8))),
|
||||||
|
(VSHLLi8 DPR:$Rn, 8)>;
|
||||||
|
def : Pat<(v4i32 (NEONvshl (zext (v4i16 DPR:$Rn)), (i32 16))),
|
||||||
|
(VSHLLi16 DPR:$Rn, 16)>;
|
||||||
|
def : Pat<(v2i64 (NEONvshl (zext (v2i32 DPR:$Rn)), (i32 32))),
|
||||||
|
(VSHLLi32 DPR:$Rn, 32)>;
|
||||||
|
def : Pat<(v8i16 (NEONvshl (sext (v8i8 DPR:$Rn)), (i32 8))),
|
||||||
|
(VSHLLi8 DPR:$Rn, 8)>;
|
||||||
|
def : Pat<(v4i32 (NEONvshl (sext (v4i16 DPR:$Rn)), (i32 16))),
|
||||||
|
(VSHLLi16 DPR:$Rn, 16)>;
|
||||||
|
def : Pat<(v2i64 (NEONvshl (sext (v2i32 DPR:$Rn)), (i32 32))),
|
||||||
|
(VSHLLi32 DPR:$Rn, 32)>;
|
||||||
|
|
||||||
// VSHRN : Vector Shift Right and Narrow
|
// VSHRN : Vector Shift Right and Narrow
|
||||||
defm VSHRN : N2VNSh_HSD<0,1,0b1000,0,0,1, IIC_VSHLiD, "vshrn", "i",
|
defm VSHRN : N2VNSh_HSD<0,1,0b1000,0,0,1, IIC_VSHLiD, "vshrn", "i",
|
||||||
|
|
|
@ -3,49 +3,55 @@
|
||||||
define <8 x i16> @vshlls8(<8 x i8>* %A) nounwind {
|
define <8 x i16> @vshlls8(<8 x i8>* %A) nounwind {
|
||||||
;CHECK-LABEL: vshlls8:
|
;CHECK-LABEL: vshlls8:
|
||||||
;CHECK: vshll.s8
|
;CHECK: vshll.s8
|
||||||
%tmp1 = load <8 x i8>* %A
|
%tmp1 = load <8 x i8>* %A
|
||||||
%tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
|
%sext = sext <8 x i8> %tmp1 to <8 x i16>
|
||||||
ret <8 x i16> %tmp2
|
%shift = shl <8 x i16> %sext, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
|
||||||
|
ret <8 x i16> %shift
|
||||||
}
|
}
|
||||||
|
|
||||||
define <4 x i32> @vshlls16(<4 x i16>* %A) nounwind {
|
define <4 x i32> @vshlls16(<4 x i16>* %A) nounwind {
|
||||||
;CHECK-LABEL: vshlls16:
|
;CHECK-LABEL: vshlls16:
|
||||||
;CHECK: vshll.s16
|
;CHECK: vshll.s16
|
||||||
%tmp1 = load <4 x i16>* %A
|
%tmp1 = load <4 x i16>* %A
|
||||||
%tmp2 = call <4 x i32> @llvm.arm.neon.vshiftls.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
|
%sext = sext <4 x i16> %tmp1 to <4 x i32>
|
||||||
ret <4 x i32> %tmp2
|
%shift = shl <4 x i32> %sext, <i32 15, i32 15, i32 15, i32 15>
|
||||||
|
ret <4 x i32> %shift
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i64> @vshlls32(<2 x i32>* %A) nounwind {
|
define <2 x i64> @vshlls32(<2 x i32>* %A) nounwind {
|
||||||
;CHECK-LABEL: vshlls32:
|
;CHECK-LABEL: vshlls32:
|
||||||
;CHECK: vshll.s32
|
;CHECK: vshll.s32
|
||||||
%tmp1 = load <2 x i32>* %A
|
%tmp1 = load <2 x i32>* %A
|
||||||
%tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
|
%sext = sext <2 x i32> %tmp1 to <2 x i64>
|
||||||
ret <2 x i64> %tmp2
|
%shift = shl <2 x i64> %sext, <i64 31, i64 31>
|
||||||
|
ret <2 x i64> %shift
|
||||||
}
|
}
|
||||||
|
|
||||||
define <8 x i16> @vshllu8(<8 x i8>* %A) nounwind {
|
define <8 x i16> @vshllu8(<8 x i8>* %A) nounwind {
|
||||||
;CHECK-LABEL: vshllu8:
|
;CHECK-LABEL: vshllu8:
|
||||||
;CHECK: vshll.u8
|
;CHECK: vshll.u8
|
||||||
%tmp1 = load <8 x i8>* %A
|
%tmp1 = load <8 x i8>* %A
|
||||||
%tmp2 = call <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
|
%zext = zext <8 x i8> %tmp1 to <8 x i16>
|
||||||
ret <8 x i16> %tmp2
|
%shift = shl <8 x i16> %zext, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
|
||||||
|
ret <8 x i16> %shift
|
||||||
}
|
}
|
||||||
|
|
||||||
define <4 x i32> @vshllu16(<4 x i16>* %A) nounwind {
|
define <4 x i32> @vshllu16(<4 x i16>* %A) nounwind {
|
||||||
;CHECK-LABEL: vshllu16:
|
;CHECK-LABEL: vshllu16:
|
||||||
;CHECK: vshll.u16
|
;CHECK: vshll.u16
|
||||||
%tmp1 = load <4 x i16>* %A
|
%tmp1 = load <4 x i16>* %A
|
||||||
%tmp2 = call <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
|
%zext = zext <4 x i16> %tmp1 to <4 x i32>
|
||||||
ret <4 x i32> %tmp2
|
%shift = shl <4 x i32> %zext, <i32 15, i32 15, i32 15, i32 15>
|
||||||
|
ret <4 x i32> %shift
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i64> @vshllu32(<2 x i32>* %A) nounwind {
|
define <2 x i64> @vshllu32(<2 x i32>* %A) nounwind {
|
||||||
;CHECK-LABEL: vshllu32:
|
;CHECK-LABEL: vshllu32:
|
||||||
;CHECK: vshll.u32
|
;CHECK: vshll.u32
|
||||||
%tmp1 = load <2 x i32>* %A
|
%tmp1 = load <2 x i32>* %A
|
||||||
%tmp2 = call <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
|
%zext = zext <2 x i32> %tmp1 to <2 x i64>
|
||||||
ret <2 x i64> %tmp2
|
%shift = shl <2 x i64> %zext, <i64 31, i64 31>
|
||||||
|
ret <2 x i64> %shift
|
||||||
}
|
}
|
||||||
|
|
||||||
; The following tests use the maximum shift count, so the signedness is
|
; The following tests use the maximum shift count, so the signedness is
|
||||||
|
@ -53,31 +59,58 @@ define <2 x i64> @vshllu32(<2 x i32>* %A) nounwind {
|
||||||
define <8 x i16> @vshlli8(<8 x i8>* %A) nounwind {
|
define <8 x i16> @vshlli8(<8 x i8>* %A) nounwind {
|
||||||
;CHECK-LABEL: vshlli8:
|
;CHECK-LABEL: vshlli8:
|
||||||
;CHECK: vshll.i8
|
;CHECK: vshll.i8
|
||||||
%tmp1 = load <8 x i8>* %A
|
%tmp1 = load <8 x i8>* %A
|
||||||
%tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >)
|
%sext = sext <8 x i8> %tmp1 to <8 x i16>
|
||||||
ret <8 x i16> %tmp2
|
%shift = shl <8 x i16> %sext, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
|
||||||
|
ret <8 x i16> %shift
|
||||||
}
|
}
|
||||||
|
|
||||||
define <4 x i32> @vshlli16(<4 x i16>* %A) nounwind {
|
define <4 x i32> @vshlli16(<4 x i16>* %A) nounwind {
|
||||||
;CHECK-LABEL: vshlli16:
|
;CHECK-LABEL: vshlli16:
|
||||||
;CHECK: vshll.i16
|
;CHECK: vshll.i16
|
||||||
%tmp1 = load <4 x i16>* %A
|
%tmp1 = load <4 x i16>* %A
|
||||||
%tmp2 = call <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 16, i16 16, i16 16, i16 16 >)
|
%zext = zext <4 x i16> %tmp1 to <4 x i32>
|
||||||
ret <4 x i32> %tmp2
|
%shift = shl <4 x i32> %zext, <i32 16, i32 16, i32 16, i32 16>
|
||||||
|
ret <4 x i32> %shift
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i64> @vshlli32(<2 x i32>* %A) nounwind {
|
define <2 x i64> @vshlli32(<2 x i32>* %A) nounwind {
|
||||||
;CHECK-LABEL: vshlli32:
|
;CHECK-LABEL: vshlli32:
|
||||||
;CHECK: vshll.i32
|
;CHECK: vshll.i32
|
||||||
%tmp1 = load <2 x i32>* %A
|
%tmp1 = load <2 x i32>* %A
|
||||||
%tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 32, i32 32 >)
|
%zext = zext <2 x i32> %tmp1 to <2 x i64>
|
||||||
ret <2 x i64> %tmp2
|
%shift = shl <2 x i64> %zext, <i64 32, i64 32>
|
||||||
|
ret <2 x i64> %shift
|
||||||
}
|
}
|
||||||
|
|
||||||
declare <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
|
; And these have a shift just out of range so separate vmovl and vshl
|
||||||
declare <4 x i32> @llvm.arm.neon.vshiftls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
|
; instructions are needed.
|
||||||
declare <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
|
define <8 x i16> @vshllu8_bad(<8 x i8>* %A) nounwind {
|
||||||
|
; CHECK-LABEL: vshllu8_bad:
|
||||||
|
; CHECK: vmovl.u8
|
||||||
|
; CHECK: vshl.i16
|
||||||
|
%tmp1 = load <8 x i8>* %A
|
||||||
|
%zext = zext <8 x i8> %tmp1 to <8 x i16>
|
||||||
|
%shift = shl <8 x i16> %zext, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
|
||||||
|
ret <8 x i16> %shift
|
||||||
|
}
|
||||||
|
|
||||||
declare <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
|
define <4 x i32> @vshlls16_bad(<4 x i16>* %A) nounwind {
|
||||||
declare <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
|
; CHECK-LABEL: vshlls16_bad:
|
||||||
declare <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
|
; CHECK: vmovl.s16
|
||||||
|
; CHECK: vshl.i32
|
||||||
|
%tmp1 = load <4 x i16>* %A
|
||||||
|
%sext = sext <4 x i16> %tmp1 to <4 x i32>
|
||||||
|
%shift = shl <4 x i32> %sext, <i32 17, i32 17, i32 17, i32 17>
|
||||||
|
ret <4 x i32> %shift
|
||||||
|
}
|
||||||
|
|
||||||
|
define <2 x i64> @vshllu32_bad(<2 x i32>* %A) nounwind {
|
||||||
|
; CHECK-LABEL: vshllu32_bad:
|
||||||
|
; CHECK: vmovl.u32
|
||||||
|
; CHECK: vshl.i64
|
||||||
|
%tmp1 = load <2 x i32>* %A
|
||||||
|
%zext = zext <2 x i32> %tmp1 to <2 x i64>
|
||||||
|
%shift = shl <2 x i64> %zext, <i64 33, i64 33>
|
||||||
|
ret <2 x i64> %shift
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue