[AArch64]Add missing pair intrinsics such as:

int32_t vminv_s32(int32x2_t a) 
which should be compiled into SMINP Vd.2S,Vn.2S,Vm.2S

llvm-svn: 196750
This commit is contained in:
Hao Liu 2013-12-09 03:52:22 +00:00
parent 96a587a9f7
commit 844a7da243
2 changed files with 56 additions and 0 deletions

View File

@ -2026,6 +2026,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
// The followings are intrinsics with scalar results generated AcrossVec vectors
case AArch64::BI__builtin_neon_vaddlv_s8:
case AArch64::BI__builtin_neon_vaddlv_s16:
case AArch64::BI__builtin_neon_vaddlv_s32:
case AArch64::BI__builtin_neon_vaddlvq_s8:
case AArch64::BI__builtin_neon_vaddlvq_s16:
case AArch64::BI__builtin_neon_vaddlvq_s32:
@ -2033,6 +2034,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
AcrossVec = true; ExtendEle = true; s = "saddlv"; break;
case AArch64::BI__builtin_neon_vaddlv_u8:
case AArch64::BI__builtin_neon_vaddlv_u16:
case AArch64::BI__builtin_neon_vaddlv_u32:
case AArch64::BI__builtin_neon_vaddlvq_u8:
case AArch64::BI__builtin_neon_vaddlvq_u16:
case AArch64::BI__builtin_neon_vaddlvq_u32:
@ -2040,6 +2042,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
AcrossVec = true; ExtendEle = true; s = "uaddlv"; break;
case AArch64::BI__builtin_neon_vmaxv_s8:
case AArch64::BI__builtin_neon_vmaxv_s16:
case AArch64::BI__builtin_neon_vmaxv_s32:
case AArch64::BI__builtin_neon_vmaxvq_s8:
case AArch64::BI__builtin_neon_vmaxvq_s16:
case AArch64::BI__builtin_neon_vmaxvq_s32:
@ -2047,6 +2050,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
AcrossVec = true; ExtendEle = false; s = "smaxv"; break;
case AArch64::BI__builtin_neon_vmaxv_u8:
case AArch64::BI__builtin_neon_vmaxv_u16:
case AArch64::BI__builtin_neon_vmaxv_u32:
case AArch64::BI__builtin_neon_vmaxvq_u8:
case AArch64::BI__builtin_neon_vmaxvq_u16:
case AArch64::BI__builtin_neon_vmaxvq_u32:
@ -2054,6 +2058,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
AcrossVec = true; ExtendEle = false; s = "umaxv"; break;
case AArch64::BI__builtin_neon_vminv_s8:
case AArch64::BI__builtin_neon_vminv_s16:
case AArch64::BI__builtin_neon_vminv_s32:
case AArch64::BI__builtin_neon_vminvq_s8:
case AArch64::BI__builtin_neon_vminvq_s16:
case AArch64::BI__builtin_neon_vminvq_s32:
@ -2061,6 +2066,7 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
AcrossVec = true; ExtendEle = false; s = "sminv"; break;
case AArch64::BI__builtin_neon_vminv_u8:
case AArch64::BI__builtin_neon_vminv_u16:
case AArch64::BI__builtin_neon_vminv_u32:
case AArch64::BI__builtin_neon_vminvq_u8:
case AArch64::BI__builtin_neon_vminvq_u16:
case AArch64::BI__builtin_neon_vminvq_u32:
@ -2068,12 +2074,14 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
AcrossVec = true; ExtendEle = false; s = "uminv"; break;
case AArch64::BI__builtin_neon_vaddv_s8:
case AArch64::BI__builtin_neon_vaddv_s16:
case AArch64::BI__builtin_neon_vaddv_s32:
case AArch64::BI__builtin_neon_vaddvq_s8:
case AArch64::BI__builtin_neon_vaddvq_s16:
case AArch64::BI__builtin_neon_vaddvq_s32:
case AArch64::BI__builtin_neon_vaddvq_s64:
case AArch64::BI__builtin_neon_vaddv_u8:
case AArch64::BI__builtin_neon_vaddv_u16:
case AArch64::BI__builtin_neon_vaddv_u32:
case AArch64::BI__builtin_neon_vaddvq_u8:
case AArch64::BI__builtin_neon_vaddvq_u16:
case AArch64::BI__builtin_neon_vaddvq_u32:

View File

@ -11723,3 +11723,51 @@ float64x1_t test_vrsqrts_f64(float64x1_t a, float64x1_t b) {
return vrsqrts_f64(a, b);
// CHECK: frsqrts d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
}
int32_t test_vminv_s32(int32x2_t a) {
// CHECK-LABEL: test_vminv_s32
return vminv_s32(a);
// CHECK: sminp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
}
uint32_t test_vminv_u32(uint32x2_t a) {
// CHECK-LABEL: test_vminv_u32
return vminv_u32(a);
// CHECK: uminp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
}
int32_t test_vmaxv_s32(int32x2_t a) {
// CHECK-LABEL: test_vmaxv_s32
return vmaxv_s32(a);
// CHECK: smaxp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
}
uint32_t test_vmaxv_u32(uint32x2_t a) {
// CHECK-LABEL: test_vmaxv_u32
return vmaxv_u32(a);
// CHECK: umaxp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
}
int32_t test_vaddv_s32(int32x2_t a) {
// CHECK-LABEL: test_vaddv_s32
return vaddv_s32(a);
// CHECK: addp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
}
uint32_t test_vaddv_u32(uint32x2_t a) {
// CHECK-LABEL: test_vaddv_u32
return vaddv_u32(a);
// CHECK: addp {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
}
int64_t test_vaddlv_s32(int32x2_t a) {
// CHECK-LABEL: test_vaddlv_s32
return vaddlv_s32(a);
// CHECK: saddlp {{v[0-9]+}}.1d, {{v[0-9]+}}.2s
}
uint64_t test_vaddlv_u32(uint32x2_t a) {
// CHECK-LABEL: test_vaddlv_u32
return vaddlv_u32(a);
// CHECK: uaddlp {{v[0-9]+}}.1d, {{v[0-9]+}}.2s
}