2019-06-28 15:08:42 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2020-04-22 23:33:11 +08:00
|
|
|
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-MVE
|
|
|
|
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-MVEFP
|
2019-06-28 15:08:42 +08:00
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x i32> @shuffle1_i32(<4 x i32> %src) {
|
|
|
|
; CHECK-LABEL: shuffle1_i32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.f32 s4, s3
|
|
|
|
; CHECK-NEXT: vmov.f32 s5, s2
|
|
|
|
; CHECK-NEXT: vmov.f32 s6, s1
|
|
|
|
; CHECK-NEXT: vmov.f32 s7, s0
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <4 x i32> %src, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
|
|
|
ret <4 x i32> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x i32> @shuffle2_i32(<4 x i32> %src) {
|
|
|
|
; CHECK-LABEL: shuffle2_i32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <4 x i32> %src, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
ret <4 x i32> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x i32> @shuffle3_i32(<4 x i32> %src) {
|
|
|
|
; CHECK-LABEL: shuffle3_i32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.f32 s4, s3
|
|
|
|
; CHECK-NEXT: vmov.f32 s5, s1
|
|
|
|
; CHECK-NEXT: vmov.f32 s6, s2
|
|
|
|
; CHECK-NEXT: vmov.f32 s7, s0
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <4 x i32> %src, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 2, i32 0>
|
|
|
|
ret <4 x i32> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x i32> @shuffle5_i32(<4 x i32> %src) {
|
|
|
|
; CHECK-LABEL: shuffle5_i32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-08-01 19:22:03 +08:00
|
|
|
; CHECK-NEXT: vrev64.32 q1, q0
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
2019-06-28 15:08:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <4 x i32> %src, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
|
|
ret <4 x i32> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x i32> @shuffle6_i32(<4 x i32> %src) {
|
|
|
|
; CHECK-LABEL: shuffle6_i32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <4 x i32> %src, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 3>
|
|
|
|
ret <4 x i32> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x i16> @shuffle1_i16(<8 x i16> %src) {
|
|
|
|
; CHECK-LABEL: shuffle1_i16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov q1, q0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q0[7]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[0], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[6]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[1], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[5]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[2], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[4]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[3], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[3]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[4], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[2]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[5], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[1]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[6], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[0]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[7], r0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <8 x i16> %src, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
|
|
|
ret <8 x i16> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x i16> @shuffle2_i16(<8 x i16> %src) {
|
|
|
|
; CHECK-LABEL: shuffle2_i16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <8 x i16> %src, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
|
|
ret <8 x i16> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x i16> @shuffle3_i16(<8 x i16> %src) {
|
|
|
|
; CHECK-LABEL: shuffle3_i16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov q1, q0
|
2019-11-27 00:51:02 +08:00
|
|
|
; CHECK-NEXT: vmov.u16 r0, q0[7]
|
2019-06-28 15:08:42 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q0[2], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[6]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[3], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[3]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[4], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[1]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[5], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[2]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[6], r0
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q1[0]
|
|
|
|
; CHECK-NEXT: vmov.16 q0[7], r0
|
2019-11-27 00:51:02 +08:00
|
|
|
; CHECK-NEXT: vmov.f32 s0, s6
|
2019-06-28 15:08:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <8 x i16> %src, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 7, i32 6, i32 3, i32 1, i32 2, i32 0>
|
|
|
|
ret <8 x i16> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x i16> @shuffle5_i16(<8 x i16> %src) {
|
|
|
|
; CHECK-LABEL: shuffle5_i16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-08-01 19:22:03 +08:00
|
|
|
; CHECK-NEXT: vrev64.16 q1, q0
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
2019-06-28 15:08:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <8 x i16> %src, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
|
|
|
ret <8 x i16> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x i16> @shuffle6_i16(<8 x i16> %src) {
|
|
|
|
; CHECK-LABEL: shuffle6_i16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vrev32.16 q0, q0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <8 x i16> %src, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
|
|
|
|
ret <8 x i16> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <16 x i8> @shuffle1_i8(<16 x i8> %src) {
|
|
|
|
; CHECK-LABEL: shuffle1_i8:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov q1, q0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q0[15]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[0], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[14]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[1], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[13]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[2], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[12]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[3], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[11]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[4], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[10]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[5], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[9]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[6], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[8]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[7], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[7]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[8], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[6]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[9], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[5]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[10], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[4]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[11], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[3]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[12], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[2]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[13], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[1]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[14], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[0]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[15], r0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <16 x i8> %src, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
|
|
|
ret <16 x i8> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <16 x i8> @shuffle2_i8(<16 x i8> %src) {
|
|
|
|
; CHECK-LABEL: shuffle2_i8:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <16 x i8> %src, <16 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
|
|
|
ret <16 x i8> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <16 x i8> @shuffle3_i8(<16 x i8> %src) {
|
|
|
|
; CHECK-LABEL: shuffle3_i8:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov q1, q0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q0[4]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[0], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[5]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[1], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[15]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[2], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[7]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[3], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[14]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[4], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[9]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[5], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[6]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[6], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[3]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[7], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[10]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[8], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[12]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[9], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[1]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[10], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[13]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[11], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[2]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[12], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[8]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[13], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[0]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[14], r0
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q1[11]
|
|
|
|
; CHECK-NEXT: vmov.8 q0[15], r0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <16 x i8> %src, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 15, i32 7, i32 14, i32 9, i32 6, i32 3, i32 10, i32 12, i32 1, i32 13, i32 2, i32 8, i32 0, i32 11>
|
|
|
|
ret <16 x i8> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <16 x i8> @shuffle5_i8(<16 x i8> %src) {
|
|
|
|
; CHECK-LABEL: shuffle5_i8:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-08-01 19:22:03 +08:00
|
|
|
; CHECK-NEXT: vrev64.8 q1, q0
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
2019-06-28 15:08:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <16 x i8> %src, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
|
|
|
|
ret <16 x i8> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <16 x i8> @shuffle6_i8(<16 x i8> %src) {
|
|
|
|
; CHECK-LABEL: shuffle6_i8:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vrev32.8 q0, q0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <16 x i8> %src, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
|
|
|
|
ret <16 x i8> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <16 x i8> @shuffle7_i8(<16 x i8> %src) {
|
|
|
|
; CHECK-LABEL: shuffle7_i8:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vrev16.8 q0, q0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <16 x i8> %src, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
|
|
|
|
ret <16 x i8> %out
|
|
|
|
}
|
|
|
|
|
2019-07-16 02:42:54 +08:00
|
|
|
define arm_aapcs_vfpcc <2 x i64> @shuffle1_i64(<2 x i64> %src) {
|
|
|
|
; CHECK-LABEL: shuffle1_i64:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <2 x i64> %src, <2 x i64> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
ret <2 x i64> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <2 x i64> @shuffle2_i64(<2 x i64> %src) {
|
|
|
|
; CHECK-LABEL: shuffle2_i64:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.f32 s4, s2
|
|
|
|
; CHECK-NEXT: vmov.f32 s5, s3
|
|
|
|
; CHECK-NEXT: vmov.f32 s6, s0
|
|
|
|
; CHECK-NEXT: vmov.f32 s7, s1
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <2 x i64> %src, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
|
|
|
|
ret <2 x i64> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <2 x i64> @shuffle3_i64(<2 x i64> %src) {
|
|
|
|
; CHECK-LABEL: shuffle3_i64:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <2 x i64> %src, <2 x i64> undef, <2 x i32> <i32 undef, i32 1>
|
|
|
|
ret <2 x i64> %out
|
|
|
|
}
|
|
|
|
|
2019-06-28 15:08:42 +08:00
|
|
|
define arm_aapcs_vfpcc <4 x float> @shuffle1_f32(<4 x float> %src) {
|
|
|
|
; CHECK-LABEL: shuffle1_f32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.f32 s4, s3
|
|
|
|
; CHECK-NEXT: vmov.f32 s5, s2
|
|
|
|
; CHECK-NEXT: vmov.f32 s6, s1
|
|
|
|
; CHECK-NEXT: vmov.f32 s7, s0
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <4 x float> %src, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
|
|
|
ret <4 x float> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @shuffle2_f32(<4 x float> %src) {
|
|
|
|
; CHECK-LABEL: shuffle2_f32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <4 x float> %src, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
ret <4 x float> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @shuffle3_f32(<4 x float> %src) {
|
|
|
|
; CHECK-LABEL: shuffle3_f32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.f32 s4, s3
|
|
|
|
; CHECK-NEXT: vmov.f32 s5, s1
|
|
|
|
; CHECK-NEXT: vmov.f32 s6, s2
|
|
|
|
; CHECK-NEXT: vmov.f32 s7, s0
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <4 x float> %src, <4 x float> undef, <4 x i32> <i32 3, i32 1, i32 2, i32 0>
|
|
|
|
ret <4 x float> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @shuffle5_f32(<4 x float> %src) {
|
|
|
|
; CHECK-LABEL: shuffle5_f32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-08-01 19:22:03 +08:00
|
|
|
; CHECK-NEXT: vrev64.32 q1, q0
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
2019-06-28 15:08:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <4 x float> %src, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
|
|
|
|
ret <4 x float> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x half> @shuffle1_f16(<8 x half> %src) {
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-LABEL: shuffle1_f16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmovx.f16 s4, s3
|
|
|
|
; CHECK-NEXT: vmov r0, s3
|
|
|
|
; CHECK-NEXT: vmov r1, s4
|
|
|
|
; CHECK-NEXT: vmovx.f16 s8, s2
|
|
|
|
; CHECK-NEXT: vmov.16 q1[0], r1
|
|
|
|
; CHECK-NEXT: vmov.16 q1[1], r0
|
|
|
|
; CHECK-NEXT: vmov r0, s8
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q1[2], r0
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmov r0, s2
|
|
|
|
; CHECK-NEXT: vmovx.f16 s8, s1
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q1[3], r0
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmov r0, s8
|
|
|
|
; CHECK-NEXT: vmovx.f16 s8, s0
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q1[4], r0
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmov r0, s1
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q1[5], r0
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmov r0, s8
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q1[6], r0
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmov r0, s0
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q1[7], r0
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
2019-06-28 15:08:42 +08:00
|
|
|
entry:
|
|
|
|
%out = shufflevector <8 x half> %src, <8 x half> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
|
|
|
ret <8 x half> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x half> @shuffle2_f16(<8 x half> %src) {
|
|
|
|
; CHECK-LABEL: shuffle2_f16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <8 x half> %src, <8 x half> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
|
|
ret <8 x half> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x half> @shuffle3_f16(<8 x half> %src) {
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-LABEL: shuffle3_f16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-11-27 00:51:02 +08:00
|
|
|
; CHECK-NEXT: vmovx.f16 s4, s3
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmov r0, s3
|
2019-11-27 00:51:02 +08:00
|
|
|
; CHECK-NEXT: vmov r1, s4
|
|
|
|
; CHECK-NEXT: vmovx.f16 s8, s1
|
|
|
|
; CHECK-NEXT: vmov.16 q1[2], r1
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q1[3], r0
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmov r0, s8
|
|
|
|
; CHECK-NEXT: vmovx.f16 s8, s0
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q1[4], r0
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmov r0, s8
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q1[5], r0
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmov r0, s1
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q1[6], r0
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmov r0, s0
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov.16 q1[7], r0
|
2019-11-27 00:51:02 +08:00
|
|
|
; CHECK-NEXT: vmov.f32 s4, s2
|
[ARM] MVE: allow soft-float ABI to pass vector types.
Passing a vector type over the soft-float ABI involves it being split
into four GPRs, so the first thing that has to happen at the start of
the function is to recombine those into a vector register. The ABI
types all vectors as v2f64, so we need to support BUILD_VECTOR for
that type, which I do in this patch by allowing it to be expanded in
terms of INSERT_VECTOR_ELT, and writing an ISel pattern for that in
turn. Similarly, I provide a rule for EXTRACT_VECTOR_ELT so that a
returned vector can be marshalled back into GPRs.
While I'm here, I've also added ISD::UNDEF to the list of operations
we turn back on in `setAllExpand`, because I noticed that otherwise it
gets expanded into a BUILD_VECTOR with explicit zero inputs, leading
to pointless machine instructions to zero out a vector register that's
about to have every lane overwritten of in any case.
Reviewers: dmgreen, ostannard
Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D63937
llvm-svn: 364910
2019-07-02 19:26:11 +08:00
|
|
|
; CHECK-NEXT: vmov q0, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
2019-06-28 15:08:42 +08:00
|
|
|
entry:
|
|
|
|
%out = shufflevector <8 x half> %src, <8 x half> undef, <8 x i32> <i32 4, i32 5, i32 7, i32 6, i32 3, i32 1, i32 2, i32 0>
|
|
|
|
ret <8 x half> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x half> @shuffle5_f16(<8 x half> %src) {
|
|
|
|
; CHECK-LABEL: shuffle5_f16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-08-01 19:22:03 +08:00
|
|
|
; CHECK-NEXT: vrev64.16 q1, q0
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
2019-06-28 15:08:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <8 x half> %src, <8 x half> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
|
|
|
|
ret <8 x half> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x half> @shuffle6_f16(<8 x half> %src) {
|
|
|
|
; CHECK-LABEL: shuffle6_f16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vrev32.16 q0, q0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <8 x half> %src, <8 x half> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
|
|
|
|
ret <8 x half> %out
|
|
|
|
}
|
|
|
|
|
2019-07-16 02:42:54 +08:00
|
|
|
define arm_aapcs_vfpcc <2 x double> @shuffle1_f64(<2 x double> %src) {
|
|
|
|
; CHECK-LABEL: shuffle1_f64:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <2 x double> %src, <2 x double> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
ret <2 x double> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <2 x double> @shuffle2_f64(<2 x double> %src) {
|
|
|
|
; CHECK-LABEL: shuffle2_f64:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.f32 s4, s2
|
|
|
|
; CHECK-NEXT: vmov.f32 s5, s3
|
|
|
|
; CHECK-NEXT: vmov.f32 s6, s0
|
|
|
|
; CHECK-NEXT: vmov.f32 s7, s1
|
|
|
|
; CHECK-NEXT: vmov q0, q1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <2 x double> %src, <2 x double> undef, <2 x i32> <i32 1, i32 0>
|
|
|
|
ret <2 x double> %out
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <2 x double> @shuffle3_f64(<2 x double> %src) {
|
|
|
|
; CHECK-LABEL: shuffle3_f64:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%out = shufflevector <2 x double> %src, <2 x double> undef, <2 x i32> <i32 undef, i32 1>
|
|
|
|
ret <2 x double> %out
|
|
|
|
}
|
|
|
|
|
2019-06-28 15:08:42 +08:00
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x i32> @insert_i32(i32 %a) {
|
|
|
|
; CHECK-LABEL: insert_i32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.32 q0[0], r0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = insertelement <4 x i32> undef, i32 %a, i32 0
|
|
|
|
ret <4 x i32> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <8 x i16> @insert_i16(i16 %a) {
|
|
|
|
; CHECK-LABEL: insert_i16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.16 q0[0], r0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = insertelement <8 x i16> undef, i16 %a, i32 0
|
|
|
|
ret <8 x i16> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <16 x i8> @insert_i8(i8 %a) {
|
|
|
|
; CHECK-LABEL: insert_i8:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.8 q0[0], r0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = insertelement <16 x i8> undef, i8 %a, i32 0
|
|
|
|
ret <16 x i8> %res
|
|
|
|
}
|
|
|
|
|
2019-07-16 02:42:54 +08:00
|
|
|
define arm_aapcs_vfpcc <2 x i64> @insert_i64(i64 %a) {
|
|
|
|
; CHECK-LABEL: insert_i64:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.32 q0[0], r0
|
|
|
|
; CHECK-NEXT: vmov.32 q0[1], r1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = insertelement <2 x i64> undef, i64 %a, i32 0
|
|
|
|
ret <2 x i64> %res
|
|
|
|
}
|
|
|
|
|
2019-06-28 15:08:42 +08:00
|
|
|
define arm_aapcs_vfpcc <4 x float> @insert_f32(float %a) {
|
|
|
|
; CHECK-LABEL: insert_f32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: @ kill: def $s0 killed $s0 def $q0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = insertelement <4 x float> undef, float %a, i32 0
|
|
|
|
ret <4 x float> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; TODO: Calling convention needs fixing to pass half types directly to functions
|
|
|
|
define arm_aapcs_vfpcc <8 x half> @insert_f16(half *%aa) {
|
|
|
|
; CHECK-LABEL: insert_f16:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vldr.16 s0, [r0]
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%a = load half, half* %aa
|
|
|
|
%res = insertelement <8 x half> undef, half %a, i32 0
|
|
|
|
ret <8 x half> %res
|
|
|
|
}
|
|
|
|
|
2019-07-16 02:42:54 +08:00
|
|
|
define arm_aapcs_vfpcc <2 x double> @insert_f64(double %a) {
|
|
|
|
; CHECK-LABEL: insert_f64:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: .save {r4, r6, r7, lr}
|
|
|
|
; CHECK-NEXT: push {r4, r6, r7, lr}
|
|
|
|
; CHECK-NEXT: .setfp r7, sp, #8
|
|
|
|
; CHECK-NEXT: add r7, sp, #8
|
|
|
|
; CHECK-NEXT: .pad #16
|
|
|
|
; CHECK-NEXT: sub sp, #16
|
|
|
|
; CHECK-NEXT: mov r4, sp
|
|
|
|
; CHECK-NEXT: bfc r4, #0, #4
|
|
|
|
; CHECK-NEXT: mov sp, r4
|
|
|
|
; CHECK-NEXT: sub.w r4, r7, #8
|
|
|
|
; CHECK-NEXT: vstr d0, [sp]
|
|
|
|
; CHECK-NEXT: mov r0, sp
|
|
|
|
; CHECK-NEXT: vldrw.u32 q0, [r0]
|
|
|
|
; CHECK-NEXT: mov sp, r4
|
|
|
|
; CHECK-NEXT: pop {r4, r6, r7, pc}
|
|
|
|
entry:
|
|
|
|
%res = insertelement <2 x double> undef, double %a, i32 0
|
|
|
|
ret <2 x double> %res
|
|
|
|
}
|
|
|
|
|
2019-06-28 15:08:42 +08:00
|
|
|
define arm_aapcs_vfpcc i64 @scalar_to_vector_i32(<8 x i16> %v) {
|
|
|
|
; CHECK-LABEL: scalar_to_vector_i32:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: .pad #8
|
|
|
|
; CHECK-NEXT: sub sp, #8
|
2019-07-16 02:42:54 +08:00
|
|
|
; CHECK-NEXT: adr r1, .LCPI38_0
|
2019-06-28 15:08:42 +08:00
|
|
|
; CHECK-NEXT: vmov.u16 r0, q0[0]
|
2019-06-28 17:47:55 +08:00
|
|
|
; CHECK-NEXT: vldrw.u32 q1, [r1]
|
|
|
|
; CHECK-NEXT: vmov.32 q0[0], r0
|
|
|
|
; CHECK-NEXT: mov r2, sp
|
|
|
|
; CHECK-NEXT: vmov.f32 s1, s5
|
|
|
|
; CHECK-NEXT: vmov.f32 s2, s6
|
|
|
|
; CHECK-NEXT: vmov.f32 s3, s7
|
|
|
|
; CHECK-NEXT: vstrh.32 q0, [r2]
|
|
|
|
; CHECK-NEXT: ldrd r0, r1, [sp], #8
|
2019-06-28 15:08:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
2019-06-28 17:47:55 +08:00
|
|
|
; CHECK-NEXT: .p2align 4
|
|
|
|
; CHECK-NEXT: @ %bb.1:
|
2019-07-16 02:42:54 +08:00
|
|
|
; CHECK-NEXT: .LCPI38_0:
|
2019-06-28 17:47:55 +08:00
|
|
|
; CHECK-NEXT: .zero 4
|
|
|
|
; CHECK-NEXT: .long 7 @ 0x7
|
|
|
|
; CHECK-NEXT: .long 1 @ 0x1
|
|
|
|
; CHECK-NEXT: .long 9 @ 0x9
|
2019-06-28 15:08:42 +08:00
|
|
|
entry:
|
|
|
|
%f = shufflevector <8 x i16> %v, <8 x i16> <i16 undef, i16 7, i16 1, i16 9, i16 undef, i16 undef, i16 undef, i16 undef>, <4 x i32> <i32 0, i32 9, i32 10, i32 11>
|
|
|
|
%0 = bitcast <4 x i16> %f to i64
|
|
|
|
ret i64 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc i32 @extract_i32_0(<4 x i32> %a) {
|
|
|
|
; CHECK-LABEL: extract_i32_0:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov r0, s0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <4 x i32> %a, i32 0
|
|
|
|
ret i32 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc i32 @extract_i32_3(<4 x i32> %a) {
|
|
|
|
; CHECK-LABEL: extract_i32_3:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov r0, s3
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <4 x i32> %a, i32 3
|
|
|
|
ret i32 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc i16 @extract_i16_0(<8 x i16> %a) {
|
|
|
|
; CHECK-LABEL: extract_i16_0:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q0[0]
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <8 x i16> %a, i32 0
|
|
|
|
ret i16 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc i16 @extract_i16_3(<8 x i16> %a) {
|
|
|
|
; CHECK-LABEL: extract_i16_3:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.u16 r0, q0[3]
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <8 x i16> %a, i32 3
|
|
|
|
ret i16 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc i8 @extract_i8_0(<16 x i8> %a) {
|
|
|
|
; CHECK-LABEL: extract_i8_0:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q0[0]
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <16 x i8> %a, i32 0
|
|
|
|
ret i8 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc i8 @extract_i8_3(<16 x i8> %a) {
|
|
|
|
; CHECK-LABEL: extract_i8_3:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.u8 r0, q0[3]
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <16 x i8> %a, i32 3
|
|
|
|
ret i8 %res
|
|
|
|
}
|
|
|
|
|
2019-07-16 02:42:54 +08:00
|
|
|
define arm_aapcs_vfpcc i64 @extract_i64_0(<2 x i64> %a) {
|
|
|
|
; CHECK-LABEL: extract_i64_0:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov r0, s0
|
|
|
|
; CHECK-NEXT: vmov r1, s1
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <2 x i64> %a, i32 0
|
|
|
|
ret i64 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc i64 @extract_i64_1(<2 x i64> %a) {
|
|
|
|
; CHECK-LABEL: extract_i64_1:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov r0, s2
|
|
|
|
; CHECK-NEXT: vmov r1, s3
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <2 x i64> %a, i32 1
|
|
|
|
ret i64 %res
|
|
|
|
}
|
|
|
|
|
2019-06-28 15:08:42 +08:00
|
|
|
define arm_aapcs_vfpcc float @extract_f32_0(<4 x float> %a) {
|
|
|
|
; CHECK-LABEL: extract_f32_0:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: @ kill: def $s0 killed $s0 killed $q0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <4 x float> %a, i32 0
|
|
|
|
ret float %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc float @extract_f32_3(<4 x float> %a) {
|
|
|
|
; CHECK-LABEL: extract_f32_3:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.f32 s0, s3
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <4 x float> %a, i32 3
|
|
|
|
ret float %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc half @extract_f16_0(<8 x half> %a) {
|
|
|
|
; CHECK-LABEL: extract_f16_0:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
[ARM] Supporting lowering of half-precision FP arguments and returns in AArch32's backend
Summary:
Half-precision floating point arguments and returns are currently
promoted to either float or int32 in clang's CodeGen and there's
no existing support for the lowering of `half` arguments and returns
from IR in AArch32's backend.
Such frontend coercions, implemented as coercion through memory
in clang, can cause a series of issues in argument lowering, as causing
arguments to be stored on the wrong bits on big-endian architectures
and incurring in missing overflow detections in the return of certain
functions.
This patch introduces the handling of half-precision arguments and returns in
the backend using the actual "half" type on the IR. Using the "half"
type the backend is able to properly enforce the AAPCS' directions for
those arguments, making sure they are stored on the proper bits of the
registers and performing the necessary floating point convertions.
Reviewers: rjmccall, olista01, asl, efriedma, ostannard, SjoerdMeijer
Reviewed By: ostannard
Subscribers: stuij, hiraditya, dmgreen, llvm-commits, chill, dnsampaio, danielkiss, kristof.beyls, cfe-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D75169
2020-06-09 16:45:47 +08:00
|
|
|
; CHECK-NEXT: @ kill: def $s0 killed $s0 killed $q0
|
2019-06-28 15:08:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <8 x half> %a, i32 0
|
|
|
|
ret half %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc half @extract_f16_3(<8 x half> %a) {
|
|
|
|
; CHECK-LABEL: extract_f16_3:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
2019-08-28 18:13:23 +08:00
|
|
|
; CHECK-NEXT: vmovx.f16 s0, s1
|
2019-06-28 15:08:42 +08:00
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <8 x half> %a, i32 3
|
|
|
|
ret half %res
|
|
|
|
}
|
2019-07-16 02:42:54 +08:00
|
|
|
|
|
|
|
define arm_aapcs_vfpcc double @extract_f64_0(<2 x double> %a) {
|
|
|
|
; CHECK-LABEL: extract_f64_0:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: @ kill: def $d0 killed $d0 killed $q0
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <2 x double> %a, i32 0
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc double @extract_f64_1(<2 x double> %a) {
|
|
|
|
; CHECK-LABEL: extract_f64_1:
|
|
|
|
; CHECK: @ %bb.0: @ %entry
|
|
|
|
; CHECK-NEXT: vmov.f32 s0, s2
|
|
|
|
; CHECK-NEXT: vmov.f32 s1, s3
|
|
|
|
; CHECK-NEXT: bx lr
|
|
|
|
entry:
|
|
|
|
%res = extractelement <2 x double> %a, i32 1
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|