[ARM,MVE] Make `vqrshrun` generate the right instruction.

Summary:
A copy-paste error in `arm_mve.td` meant that the MVE `vqrshrun`
intrinsic family was generating the `vqshrun` machine instruction,
because in the IR intrinsic call, the rounding flag argument was set
to 0 rather than 1.

Reviewers: dmgreen, MarkMurrayARM, miyuki, ostannard

Reviewed By: dmgreen

Subscribers: kristof.beyls, cfe-commits

Tags: #clang

Differential Revision: https://reviews.llvm.org/D72496
This commit is contained in:
Simon Tatham 2020-01-10 11:23:24 +00:00
parent 870f691793
commit 1ccee0e863
2 changed files with 9 additions and 9 deletions

View File

@ -681,7 +681,7 @@ let params = [s16, s32, u16, u32], pnt = PNT_NType in {
}
let params = [s16, s32], pnt = PNT_NType in {
defm vqshrun : VSHRN<UHalfVector, imm_1toHalfN, (? 1,0,1,0)>;
defm vqrshrun : VSHRN<UHalfVector, imm_1toHalfN, (? 1,0,1,0)>;
defm vqrshrun : VSHRN<UHalfVector, imm_1toHalfN, (? 1,1,1,0)>;
}
let params = T.Int, pnt = PNT_NType in {
defm vsli : DyadicImmShift<Vector, imm_0toNm1>;

View File

@ -1086,7 +1086,7 @@ uint16x8_t test_vqrshrntq_m_n_u32(uint16x8_t a, uint32x4_t b, mve_pred16_t p)
// CHECK-LABEL: @test_vqrshrunbq_n_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 7, i32 1, i32 0, i32 1, i32 0, i32 0)
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 7, i32 1, i32 1, i32 1, i32 0, i32 0)
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
uint8x16_t test_vqrshrunbq_n_s16(uint8x16_t a, int16x8_t b)
@ -1100,7 +1100,7 @@ uint8x16_t test_vqrshrunbq_n_s16(uint8x16_t a, int16x8_t b)
// CHECK-LABEL: @test_vqrshrunbq_n_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 0, i32 1, i32 0, i32 0)
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 1, i32 1, i32 0, i32 0)
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
uint16x8_t test_vqrshrunbq_n_s32(uint16x8_t a, int32x4_t b)
@ -1114,7 +1114,7 @@ uint16x8_t test_vqrshrunbq_n_s32(uint16x8_t a, int32x4_t b)
// CHECK-LABEL: @test_vqrshruntq_n_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0, i32 1, i32 0, i32 1)
// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 1, i32 1, i32 0, i32 1)
// CHECK-NEXT: ret <16 x i8> [[TMP0]]
//
uint8x16_t test_vqrshruntq_n_s16(uint8x16_t a, int16x8_t b)
@ -1128,7 +1128,7 @@ uint8x16_t test_vqrshruntq_n_s16(uint8x16_t a, int16x8_t b)
// CHECK-LABEL: @test_vqrshruntq_n_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 3, i32 1, i32 0, i32 1, i32 0, i32 1)
// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 3, i32 1, i32 1, i32 1, i32 0, i32 1)
// CHECK-NEXT: ret <8 x i16> [[TMP0]]
//
uint16x8_t test_vqrshruntq_n_s32(uint16x8_t a, int32x4_t b)
@ -1144,7 +1144,7 @@ uint16x8_t test_vqrshruntq_n_s32(uint16x8_t a, int32x4_t b)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 4, i32 1, i32 0, i32 1, i32 0, i32 0, <8 x i1> [[TMP1]])
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 4, i32 1, i32 1, i32 1, i32 0, i32 0, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vqrshrunbq_m_n_s16(uint8x16_t a, int16x8_t b, mve_pred16_t p)
@ -1160,7 +1160,7 @@ uint8x16_t test_vqrshrunbq_m_n_s16(uint8x16_t a, int16x8_t b, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 10, i32 1, i32 0, i32 1, i32 0, i32 0, <4 x i1> [[TMP1]])
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 10, i32 1, i32 1, i32 1, i32 0, i32 0, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vqrshrunbq_m_n_s32(uint16x8_t a, int32x4_t b, mve_pred16_t p)
@ -1176,7 +1176,7 @@ uint16x8_t test_vqrshrunbq_m_n_s32(uint16x8_t a, int32x4_t b, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 3, i32 1, i32 0, i32 1, i32 0, i32 1, <8 x i1> [[TMP1]])
// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 3, i32 1, i32 1, i32 1, i32 0, i32 1, <8 x i1> [[TMP1]])
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vqrshruntq_m_n_s16(uint8x16_t a, int16x8_t b, mve_pred16_t p)
@ -1192,7 +1192,7 @@ uint8x16_t test_vqrshruntq_m_n_s16(uint8x16_t a, int16x8_t b, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 13, i32 1, i32 0, i32 1, i32 0, i32 1, <4 x i1> [[TMP1]])
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 13, i32 1, i32 1, i32 1, i32 0, i32 1, <4 x i1> [[TMP1]])
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vqrshruntq_m_n_s32(uint16x8_t a, int32x4_t b, mve_pred16_t p)