2019-10-02 02:04:02 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2020-06-04 19:31:42 +08:00
|
|
|
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
|
2019-10-02 02:04:02 +08:00
|
|
|
|
2020-06-04 19:31:42 +08:00
|
|
|
; i16 -> i32
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x i32> @sext_i32_0246(<8 x i16> %src) {
|
|
|
|
; CHECK-LABEL: sext_i32_0246:
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-10-02 19:40:51 +08:00
|
|
|
; CHECK-NEXT: vmovlb.s16 q0, q0
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <8 x i16> %src, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
|
|
|
|
%out = sext <4 x i16> %strided.vec to <4 x i32>
|
|
|
|
ret <4 x i32> %out
|
|
|
|
}
|
|
|
|
|
2020-06-04 19:31:42 +08:00
|
|
|
define arm_aapcs_vfpcc <4 x i32> @sext_i32_1357(<8 x i16> %src) {
|
|
|
|
; CHECK-LABEL: sext_i32_1357:
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK: @ %bb.0: @ %entry
|
[ARM,MVE] Add the vmovlbq,vmovltq intrinsic family.
Summary:
These intrinsics take a vector of 2n elements, and return a vector of
n wider elements obtained by sign- or zero-extending every other
element of the input vector. They're represented in IR as a
shufflevector that extracts the odd or even elements of the input,
followed by a sext or zext.
Existing LLVM codegen already matches this pattern and generates the
VMOVLB instruction (which widens the even-index input lanes). But no
existing isel rule was generating VMOVLT, so I've added some. However,
the new rules currently only work in little-endian MVE, because the
pattern they expect from isel lowering includes a bitconvert which
doesn't have the right semantics in big-endian.
The output of one existing codegen test is improved by those new
rules.
This commit adds the unpredicated forms only.
Reviewers: dmgreen, miyuki, MarkMurrayARM, ostannard
Reviewed By: dmgreen
Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D74336
2020-02-18 01:06:05 +08:00
|
|
|
; CHECK-NEXT: vmovlt.s16 q0, q0
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <8 x i16> %src, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
|
|
|
|
%out = sext <4 x i16> %strided.vec to <4 x i32>
|
|
|
|
ret <4 x i32> %out
|
|
|
|
}
|
|
|
|
|
2020-06-04 19:31:42 +08:00
|
|
|
define arm_aapcs_vfpcc <8 x i32> @sext_i32_02468101214(<16 x i16> %src) {
|
|
|
|
; CHECK-LABEL: sext_i32_02468101214:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmovlb.s16 q0, q0
|
|
|
|
; CHECK-NEXT: vmovlb.s16 q1, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <16 x i16> %src, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
|
|
|
|
%out = sext <8 x i16> %strided.vec to <8 x i32>
|
|
|
|
ret <8 x i32> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x i32> @sext_i32_13579111315(<16 x i16> %src) {
|
|
|
|
; CHECK-LABEL: sext_i32_13579111315:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmovlt.s16 q0, q0
|
|
|
|
; CHECK-NEXT: vmovlt.s16 q1, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <16 x i16> %src, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
|
|
|
|
%out = sext <8 x i16> %strided.vec to <8 x i32>
|
|
|
|
ret <8 x i32> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x i32> @zext_i32_0246(<8 x i16> %src) {
|
|
|
|
; CHECK-LABEL: zext_i32_0246:
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-10-02 19:40:51 +08:00
|
|
|
; CHECK-NEXT: vmovlb.u16 q0, q0
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <8 x i16> %src, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
|
|
|
|
%out = zext <4 x i16> %strided.vec to <4 x i32>
|
|
|
|
ret <4 x i32> %out
|
|
|
|
}
|
|
|
|
|
2020-06-04 19:31:42 +08:00
|
|
|
define arm_aapcs_vfpcc <4 x i32> @zext_i32_1357(<8 x i16> %src) {
|
|
|
|
; CHECK-LABEL: zext_i32_1357:
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK: @ %bb.0: @ %entry
|
[ARM,MVE] Add the vmovlbq,vmovltq intrinsic family.
Summary:
These intrinsics take a vector of 2n elements, and return a vector of
n wider elements obtained by sign- or zero-extending every other
element of the input vector. They're represented in IR as a
shufflevector that extracts the odd or even elements of the input,
followed by a sext or zext.
Existing LLVM codegen already matches this pattern and generates the
VMOVLB instruction (which widens the even-index input lanes). But no
existing isel rule was generating VMOVLT, so I've added some. However,
the new rules currently only work in little-endian MVE, because the
pattern they expect from isel lowering includes a bitconvert which
doesn't have the right semantics in big-endian.
The output of one existing codegen test is improved by those new
rules.
This commit adds the unpredicated forms only.
Reviewers: dmgreen, miyuki, MarkMurrayARM, ostannard
Reviewed By: dmgreen
Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D74336
2020-02-18 01:06:05 +08:00
|
|
|
; CHECK-NEXT: vmovlt.u16 q0, q0
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <8 x i16> %src, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
|
|
|
|
%out = zext <4 x i16> %strided.vec to <4 x i32>
|
|
|
|
ret <4 x i32> %out
|
|
|
|
}
|
|
|
|
|
2020-06-04 19:31:42 +08:00
|
|
|
define arm_aapcs_vfpcc <8 x i32> @zext_i32_02468101214(<16 x i16> %src) {
|
|
|
|
; CHECK-LABEL: zext_i32_02468101214:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmovlb.u16 q0, q0
|
|
|
|
; CHECK-NEXT: vmovlb.u16 q1, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <16 x i16> %src, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
|
|
|
|
%out = zext <8 x i16> %strided.vec to <8 x i32>
|
|
|
|
ret <8 x i32> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x i32> @zext_i32_13579111315(<16 x i16> %src) {
|
|
|
|
; CHECK-LABEL: zext_i32_13579111315:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmovlt.u16 q0, q0
|
|
|
|
; CHECK-NEXT: vmovlt.u16 q1, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <16 x i16> %src, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
|
|
|
|
%out = zext <8 x i16> %strided.vec to <8 x i32>
|
|
|
|
ret <8 x i32> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
; i8 -> i16
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x i16> @sext_i16_02468101214(<16 x i8> %src) {
|
|
|
|
; CHECK-LABEL: sext_i16_02468101214:
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-10-02 19:40:51 +08:00
|
|
|
; CHECK-NEXT: vmovlb.s8 q0, q0
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <16 x i8> %src, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
|
|
|
|
%out = sext <8 x i8> %strided.vec to <8 x i16>
|
|
|
|
ret <8 x i16> %out
|
|
|
|
}
|
|
|
|
|
2020-06-04 19:31:42 +08:00
|
|
|
define arm_aapcs_vfpcc <8 x i16> @sext_i16_13579111315(<16 x i8> %src) {
|
|
|
|
; CHECK-LABEL: sext_i16_13579111315:
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK: @ %bb.0: @ %entry
|
[ARM,MVE] Add the vmovlbq,vmovltq intrinsic family.
Summary:
These intrinsics take a vector of 2n elements, and return a vector of
n wider elements obtained by sign- or zero-extending every other
element of the input vector. They're represented in IR as a
shufflevector that extracts the odd or even elements of the input,
followed by a sext or zext.
Existing LLVM codegen already matches this pattern and generates the
VMOVLB instruction (which widens the even-index input lanes). But no
existing isel rule was generating VMOVLT, so I've added some. However,
the new rules currently only work in little-endian MVE, because the
pattern they expect from isel lowering includes a bitconvert which
doesn't have the right semantics in big-endian.
The output of one existing codegen test is improved by those new
rules.
This commit adds the unpredicated forms only.
Reviewers: dmgreen, miyuki, MarkMurrayARM, ostannard
Reviewed By: dmgreen
Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D74336
2020-02-18 01:06:05 +08:00
|
|
|
; CHECK-NEXT: vmovlt.s8 q0, q0
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <16 x i8> %src, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
|
|
|
|
%out = sext <8 x i8> %strided.vec to <8 x i16>
|
|
|
|
ret <8 x i16> %out
|
|
|
|
}
|
|
|
|
|
2020-06-04 19:31:42 +08:00
|
|
|
define arm_aapcs_vfpcc <16 x i16> @sext_i16_024681012141618202224262830(<32 x i8> %src) {
|
|
|
|
; CHECK-LABEL: sext_i16_024681012141618202224262830:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmovlb.s8 q0, q0
|
|
|
|
; CHECK-NEXT: vmovlb.s8 q1, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <32 x i8> %src, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
|
|
|
|
%out = sext <16 x i8> %strided.vec to <16 x i16>
|
|
|
|
ret <16 x i16> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <16 x i16> @sext_i16_135791113151719212325272931(<32 x i8> %src) {
|
|
|
|
; CHECK-LABEL: sext_i16_135791113151719212325272931:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmovlt.s8 q0, q0
|
|
|
|
; CHECK-NEXT: vmovlt.s8 q1, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <32 x i8> %src, <32 x i8> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
|
|
|
|
%out = sext <16 x i8> %strided.vec to <16 x i16>
|
|
|
|
ret <16 x i16> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x i16> @zext_i16_02468101214(<16 x i8> %src) {
|
|
|
|
; CHECK-LABEL: zext_i16_02468101214:
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-10-02 19:40:51 +08:00
|
|
|
; CHECK-NEXT: vmovlb.u8 q0, q0
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <16 x i8> %src, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
|
|
|
|
%out = zext <8 x i8> %strided.vec to <8 x i16>
|
|
|
|
ret <8 x i16> %out
|
|
|
|
}
|
|
|
|
|
2020-06-04 19:31:42 +08:00
|
|
|
define arm_aapcs_vfpcc <8 x i16> @zext_i16_13579111315(<16 x i8> %src) {
|
|
|
|
; CHECK-LABEL: zext_i16_13579111315:
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK: @ %bb.0: @ %entry
|
[ARM,MVE] Add the vmovlbq,vmovltq intrinsic family.
Summary:
These intrinsics take a vector of 2n elements, and return a vector of
n wider elements obtained by sign- or zero-extending every other
element of the input vector. They're represented in IR as a
shufflevector that extracts the odd or even elements of the input,
followed by a sext or zext.
Existing LLVM codegen already matches this pattern and generates the
VMOVLB instruction (which widens the even-index input lanes). But no
existing isel rule was generating VMOVLT, so I've added some. However,
the new rules currently only work in little-endian MVE, because the
pattern they expect from isel lowering includes a bitconvert which
doesn't have the right semantics in big-endian.
The output of one existing codegen test is improved by those new
rules.
This commit adds the unpredicated forms only.
Reviewers: dmgreen, miyuki, MarkMurrayARM, ostannard
Reviewed By: dmgreen
Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D74336
2020-02-18 01:06:05 +08:00
|
|
|
; CHECK-NEXT: vmovlt.u8 q0, q0
|
2019-10-02 02:04:02 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <16 x i8> %src, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
|
|
|
|
%out = zext <8 x i8> %strided.vec to <8 x i16>
|
|
|
|
ret <8 x i16> %out
|
|
|
|
}
|
2020-06-04 19:31:42 +08:00
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <16 x i16> @zext_i16_024681012141618202224262830(<32 x i8> %src) {
|
|
|
|
; CHECK-LABEL: zext_i16_024681012141618202224262830:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmovlb.u8 q0, q0
|
|
|
|
; CHECK-NEXT: vmovlb.u8 q1, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <32 x i8> %src, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
|
|
|
|
%out = zext <16 x i8> %strided.vec to <16 x i16>
|
|
|
|
ret <16 x i16> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <16 x i16> @zext_i16_135791113151719212325272931(<32 x i8> %src) {
|
|
|
|
; CHECK-LABEL: zext_i16_135791113151719212325272931:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmovlt.u8 q0, q0
|
|
|
|
; CHECK-NEXT: vmovlt.u8 q1, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <32 x i8> %src, <32 x i8> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
|
|
|
|
%out = zext <16 x i8> %strided.vec to <16 x i16>
|
|
|
|
ret <16 x i16> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
; f16 -> f32
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @fpext_0246(<8 x half> %src) {
|
|
|
|
; CHECK-LABEL: fpext_0246:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2020-06-26 02:49:27 +08:00
|
|
|
; CHECK-NEXT: vcvtb.f32.f16 q0, q0
|
2020-06-04 19:31:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <8 x half> %src, <8 x half> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
|
|
|
|
%out = fpext <4 x half> %strided.vec to <4 x float>
|
|
|
|
ret <4 x float> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @fpext_1357(<8 x half> %src) {
|
|
|
|
; CHECK-LABEL: fpext_1357:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2020-06-26 02:49:27 +08:00
|
|
|
; CHECK-NEXT: vcvtt.f32.f16 q0, q0
|
2020-06-04 19:31:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <8 x half> %src, <8 x half> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
|
|
|
|
%out = fpext <4 x half> %strided.vec to <4 x float>
|
|
|
|
ret <4 x float> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x float> @fpext_02468101214(<16 x half> %src) {
|
|
|
|
; CHECK-LABEL: fpext_02468101214:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2020-06-26 02:49:27 +08:00
|
|
|
; CHECK-NEXT: vcvtb.f32.f16 q0, q0
|
|
|
|
; CHECK-NEXT: vcvtb.f32.f16 q1, q1
|
2020-06-04 19:31:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <16 x half> %src, <16 x half> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
|
|
|
|
%out = fpext <8 x half> %strided.vec to <8 x float>
|
|
|
|
ret <8 x float> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x float> @fpext_13579111315(<16 x half> %src) {
|
|
|
|
; CHECK-LABEL: fpext_13579111315:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2020-06-26 02:49:27 +08:00
|
|
|
; CHECK-NEXT: vcvtt.f32.f16 q0, q0
|
|
|
|
; CHECK-NEXT: vcvtt.f32.f16 q1, q1
|
2020-06-04 19:31:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%strided.vec = shufflevector <16 x half> %src, <16 x half> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
|
|
|
|
%out = fpext <8 x half> %strided.vec to <8 x float>
|
|
|
|
ret <8 x float> %out
|
|
|
|
}
|