[AArch64] Tests for legacy AArch32 NEON scalar shift by immediate instructions.

A number of non-overloaded intrinsics have been replaced by thier overloaded
counterparts.

llvm-svn: 194599
This commit is contained in:
Chad Rosier 2013-11-13 20:05:44 +00:00
parent d3ae5f895e
commit e714a962b5
2 changed files with 86 additions and 9 deletions

View File

@ -2276,12 +2276,12 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
s = "vushr"; OverloadInt = false; break;
// Scalar Signed Rounding Shift Right (Immediate)
case AArch64::BI__builtin_neon_vrshrd_n_s64:
Int = Intrinsic::aarch64_neon_vrshrds_n;
s = "vsrshr"; OverloadInt = false; break;
Int = Intrinsic::aarch64_neon_vsrshr;
s = "vsrshr"; OverloadInt = true; break;
// Scalar Unsigned Rounding Shift Right (Immediate)
case AArch64::BI__builtin_neon_vrshrd_n_u64:
Int = Intrinsic::aarch64_neon_vrshrdu_n;
s = "vurshr"; OverloadInt = false; break;
Int = Intrinsic::aarch64_neon_vurshr;
s = "vurshr"; OverloadInt = true; break;
// Scalar Signed Shift Right and Accumulate (Immediate)
case AArch64::BI__builtin_neon_vsrad_n_s64:
Int = Intrinsic::aarch64_neon_vsrads_n;
@ -2322,18 +2322,18 @@ static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
case AArch64::BI__builtin_neon_vqshluh_n_s16:
case AArch64::BI__builtin_neon_vqshlus_n_s32:
case AArch64::BI__builtin_neon_vqshlud_n_s64:
Int = Intrinsic::aarch64_neon_vqshlus_n;
Int = Intrinsic::aarch64_neon_vsqshlu;
s = "vsqshlu"; OverloadInt = true; break;
// Shift Right And Insert (Immediate)
case AArch64::BI__builtin_neon_vsrid_n_s64:
case AArch64::BI__builtin_neon_vsrid_n_u64:
Int = Intrinsic::aarch64_neon_vsrid_n;
s = "vsri"; OverloadInt = false; break;
Int = Intrinsic::aarch64_neon_vsri;
s = "vsri"; OverloadInt = true; break;
// Shift Left And Insert (Immediate)
case AArch64::BI__builtin_neon_vslid_n_s64:
case AArch64::BI__builtin_neon_vslid_n_u64:
Int = Intrinsic::aarch64_neon_vslid_n;
s = "vsli"; OverloadInt = false; break;
Int = Intrinsic::aarch64_neon_vsli;
s = "vsli"; OverloadInt = true; break;
// Signed Saturating Shift Right Narrow (Immediate)
case AArch64::BI__builtin_neon_vqshrnh_n_s16:
case AArch64::BI__builtin_neon_vqshrns_n_s32:

View File

@ -7490,24 +7490,48 @@ int64_t test_vshrd_n_s64(int64_t a) {
return (int64_t)vshrd_n_s64(a, 1);
}
int64x1_t test_vshr_n_s64(int64x1_t a) {
// CHECK-LABEL: test_vshr_n_s64
// CHECK: sshr {{d[0-9]+}}, {{d[0-9]+}}, #1
return vshr_n_s64(a, 1);
}
uint64_t test_vshrd_n_u64(uint64_t a) {
// CHECK-LABEL: test_vshrd_n_u64
// CHECK: ushr {{d[0-9]+}}, {{d[0-9]+}}, #64
return (uint64_t)vshrd_n_u64(a, 64);
}
uint64x1_t test_vshr_n_u64(uint64x1_t a) {
// CHECK-LABEL: test_vshr_n_u64
// CHECK: ushr {{d[0-9]+}}, {{d[0-9]+}}, #1
return vshr_n_u64(a, 1);
}
int64_t test_vrshrd_n_s64(int64_t a) {
// CHECK-LABEL: test_vrshrd_n_s64
// CHECK: srshr {{d[0-9]+}}, {{d[0-9]+}}, #63
return (int64_t)vrshrd_n_s64(a, 63);
}
int64x1_t test_vrshr_n_s64(int64x1_t a) {
// CHECK: test_vrshr_n_s64
// CHECK: srshr d{{[0-9]+}}, d{{[0-9]+}}, #1
return vrshr_n_s64(a, 1);
}
uint64_t test_vrshrd_n_u64(uint64_t a) {
// CHECK-LABEL: test_vrshrd_n_u64
// CHECK: urshr {{d[0-9]+}}, {{d[0-9]+}}, #63
return (uint64_t)vrshrd_n_u64(a, 63);
}
uint64x1_t test_vrshr_n_u64(uint64x1_t a) {
// CHECK: test_vrshr_n_u64
// CHECK: urshr d{{[0-9]+}}, d{{[0-9]+}}, #1
return vrshr_n_u64(a, 1);
}
int64_t test_vsrad_n_s64(int64_t a, int64_t b) {
// CHECK-LABEL: test_vsrad_n_s64
// CHECK: ssra {{d[0-9]+}}, {{d[0-9]+}}, #63
@ -7537,6 +7561,11 @@ int64_t test_vshld_n_s64(int64_t a) {
// CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #0
return (int64_t)vshld_n_s64(a, 0);
}
int64x1_t test_vshl_n_s64(int64x1_t a) {
// CHECK: test_vshl_n_s64
// CHECK: shl d{{[0-9]+}}, d{{[0-9]+}}, #1
return vshl_n_s64(a, 1);
}
uint64_t test_vshld_n_u64(uint64_t a) {
// CHECK-LABEL: test_vshld_n_u64
@ -7544,6 +7573,12 @@ uint64_t test_vshld_n_u64(uint64_t a) {
return (uint64_t)vshld_n_u64(a, 63);
}
uint64x1_t test_vshl_n_u64(uint64x1_t a) {
// CHECK: test_vshl_n_u64
// CHECK: shl d{{[0-9]+}}, d{{[0-9]+}}, #1
return vshl_n_u64(a, 1);
}
int8_t test_vqshlb_n_s8(int8_t a) {
// CHECK-LABEL: test_vqshlb_n_s8
// CHECK: sqshl {{b[0-9]+}}, {{b[0-9]+}}, #7
@ -7568,6 +7603,12 @@ int64_t test_vqshld_n_s64(int64_t a) {
return (int64_t)vqshld_n_s64(a, 63);
}
int64x1_t test_vqshl_n_s64(int64x1_t a) {
// CHECK: test_vqshl_n_s64
// CHECK: sqshl d{{[0-9]+}}, d{{[0-9]+}}, #1
return vqshl_n_s64(a, 1);
}
uint8_t test_vqshlb_n_u8(uint8_t a) {
// CHECK-LABEL: test_vqshlb_n_u8
// CHECK: uqshl {{b[0-9]+}}, {{b[0-9]+}}, #7
@ -7592,6 +7633,12 @@ uint64_t test_vqshld_n_u64(uint64_t a) {
return (uint64_t)vqshld_n_u64(a, 63);
}
uint64x1_t test_vqshl_n_u64(uint64x1_t a) {
// CHECK: test_vqshl_n_u64
// CHECK: uqshl d{{[0-9]+}}, d{{[0-9]+}}, #1
return vqshl_n_u64(a, 1);
}
int8_t test_vqshlub_n_s8(int8_t a) {
// CHECK-LABEL: test_vqshlub_n_s8
// CHECK: sqshlu {{b[0-9]+}}, {{b[0-9]+}}, #7
@ -7616,30 +7663,60 @@ int64_t test_vqshlud_n_s64(int64_t a) {
return (int64_t)vqshlud_n_s64(a, 63);
}
uint64x1_t test_vqshlu_n_s64(int64x1_t a) {
// CHECK: test_vqshlu_n_s64
// CHECK: sqshlu d{{[0-9]+}}, d{{[0-9]+}}, #1
return vqshlu_n_s64(a, 1);
}
int64_t test_vsrid_n_s64(int64_t a, int64_t b) {
// CHECK-LABEL: test_vsrid_n_s64
// CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
return (int64_t)vsrid_n_s64(a, b, 63);
}
int64x1_t test_vsri_n_s64(int64x1_t a, int64x1_t b) {
// CHECK: test_vsri_n_s64
// CHECK: sri d{{[0-9]+}}, d{{[0-9]+}}, #1
return vsri_n_s64(a, b, 1);
}
uint64_t test_vsrid_n_u64(uint64_t a, uint64_t b) {
// CHECK-LABEL: test_vsrid_n_u64
// CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
return (uint64_t)vsrid_n_u64(a, b, 63);
}
uint64x1_t test_vsri_n_u64(uint64x1_t a, uint64x1_t b) {
// CHECK: test_vsri_n_u64
// CHECK: sri d{{[0-9]+}}, d{{[0-9]+}}, #1
return vsri_n_u64(a, b, 1);
}
int64_t test_vslid_n_s64(int64_t a, int64_t b) {
// CHECK-LABEL: test_vslid_n_s64
// CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
return (int64_t)vslid_n_s64(a, b, 63);
}
int64x1_t test_vsli_n_s64(int64x1_t a, int64x1_t b) {
// CHECK: test_vsli_n_s64
// CHECK: sli d{{[0-9]+}}, d{{[0-9]+}}, #1
return vsli_n_s64(a, b, 1);
}
uint64_t test_vslid_n_u64(uint64_t a, uint64_t b) {
// CHECK-LABEL: test_vslid_n_u64
// CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
return (uint64_t)vslid_n_u64(a, b, 63);
}
uint64x1_t test_vsli_n_u64(uint64x1_t a, uint64x1_t b) {
// CHECK: test_vsli_n_u64
// CHECK: sli d{{[0-9]+}}, d{{[0-9]+}}, #1
return vsli_n_u64(a, b, 1);
}
int8_t test_vqshrnh_n_s16(int16_t a) {
// CHECK-LABEL: test_vqshrnh_n_s16
// CHECK: sqshrn {{b[0-9]+}}, {{h[0-9]+}}, #15