[mips][msa] Implemented copy_[us].d intrinsic.

This intrinsic is lowered into equivalent copy_s.w instructions during
legalization.

llvm-svn: 191518
This commit is contained in:
Daniel Sanders 2013-09-27 13:04:21 +00:00
parent 51287b9355
commit 7f3d946fb7
4 changed files with 67 additions and 0 deletions

View File

@ -780,6 +780,8 @@ def int_mips_copy_s_h : GCCBuiltin<"__builtin_msa_copy_s_h">,
Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_copy_s_w : GCCBuiltin<"__builtin_msa_copy_s_w">,
Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_copy_s_d : GCCBuiltin<"__builtin_msa_copy_s_d">,
Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_copy_u_b : GCCBuiltin<"__builtin_msa_copy_u_b">,
Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
@ -787,6 +789,8 @@ def int_mips_copy_u_h : GCCBuiltin<"__builtin_msa_copy_u_h">,
Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_copy_u_w : GCCBuiltin<"__builtin_msa_copy_u_w">,
Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_copy_u_d : GCCBuiltin<"__builtin_msa_copy_u_d">,
Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_ctcmsa : GCCBuiltin<"__builtin_msa_ctcmsa">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>;

View File

@ -32,3 +32,8 @@ ilvr.d, ilvod.d, pckod.d:
splati.w:
It is not possible to emit splati.w since shf.w covers the same cases.
shf.w will be emitted instead.
copy_s.w
On MIPS32, the copy_u.d intrinsic will emit this instruction instead of
copy_u.w. This is semantically equivalent since the general-purpose
register file is 32-bits wide.

View File

@ -1242,10 +1242,26 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_copy_s_h:
case Intrinsic::mips_copy_s_w:
return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT);
case Intrinsic::mips_copy_s_d:
// Don't lower directly into VEXTRACT_SEXT_ELT since i64 might be illegal.
// Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type
// legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
Op->getOperand(1), Op->getOperand(2));
case Intrinsic::mips_copy_u_b:
case Intrinsic::mips_copy_u_h:
case Intrinsic::mips_copy_u_w:
return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT);
case Intrinsic::mips_copy_u_d:
// Don't lower directly into VEXTRACT_ZEXT_ELT since i64 might be illegal.
// Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type
// legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
//
// Note: When i64 is illegal, this results in copy_s.w instructions instead
// of copy_u.w instructions. This makes no difference to the behaviour
// since i64 is only illegal when the register file is 32-bit.
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
Op->getOperand(1), Op->getOperand(2));
case Intrinsic::mips_div_s_b:
case Intrinsic::mips_div_s_h:
case Intrinsic::mips_div_s_w:

View File

@ -60,6 +60,27 @@ declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind
; CHECK: sw
; CHECK: .size llvm_mips_copy_s_w_test
;
@llvm_mips_copy_s_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_copy_s_d_RES = global i64 0, align 16
define void @llvm_mips_copy_s_d_test() nounwind {
entry:
%0 = load <2 x i64>* @llvm_mips_copy_s_d_ARG1
%1 = tail call i64 @llvm.mips.copy.s.d(<2 x i64> %0, i32 1)
store i64 %1, i64* @llvm_mips_copy_s_d_RES
ret void
}
declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32) nounwind
; CHECK: llvm_mips_copy_s_d_test:
; CHECK: ld.w
; CHECK: copy_s.w
; CHECK: copy_s.w
; CHECK: sw
; CHECK: sw
; CHECK: .size llvm_mips_copy_s_d_test
;
@llvm_mips_copy_u_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
@llvm_mips_copy_u_b_RES = global i32 0, align 16
@ -117,3 +138,24 @@ declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32) nounwind
; CHECK: sw
; CHECK: .size llvm_mips_copy_u_w_test
;
@llvm_mips_copy_u_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
@llvm_mips_copy_u_d_RES = global i64 0, align 16
define void @llvm_mips_copy_u_d_test() nounwind {
entry:
%0 = load <2 x i64>* @llvm_mips_copy_u_d_ARG1
%1 = tail call i64 @llvm.mips.copy.u.d(<2 x i64> %0, i32 1)
store i64 %1, i64* @llvm_mips_copy_u_d_RES
ret void
}
declare i64 @llvm.mips.copy.u.d(<2 x i64>, i32) nounwind
; CHECK: llvm_mips_copy_u_d_test:
; CHECK: ld.w
; CHECK: copy_s.w
; CHECK: copy_s.w
; CHECK: sw
; CHECK: sw
; CHECK: .size llvm_mips_copy_u_d_test
;