[ARM] Regenerate popcnt tests

Improve codegen view as part of PR32655

llvm-svn: 344465
This commit is contained in:
Simon Pilgrim 2018-10-13 21:32:49 +00:00
parent 11b6cedb8e
commit 247ea88090
1 changed files with 257 additions and 54 deletions

View File

@ -1,17 +1,27 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; Implement ctpop with vcnt
define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vcnt8:
;CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK-LABEL: vcnt8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcnt.8 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1)
ret <8 x i8> %tmp2
}
define <16 x i8> @vcntQ8(<16 x i8>* %A) nounwind {
;CHECK-LABEL: vcntQ8:
;CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK-LABEL: vcntQ8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcnt.8 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1)
ret <16 x i8> %tmp2
@ -19,11 +29,16 @@ define <16 x i8> @vcntQ8(<16 x i8>* %A) nounwind {
define <4 x i16> @vcnt16(<4 x i16>* %A) nounwind {
; CHECK-LABEL: vcnt16:
; CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vrev16.8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vadd.i8 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vuzp.8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcnt.8 d16, d16
; CHECK-NEXT: vrev16.8 d17, d16
; CHECK-NEXT: vadd.i8 d16, d16, d17
; CHECK-NEXT: vorr d17, d16, d16
; CHECK-NEXT: vuzp.8 d16, d17
; CHECK-NEXT: vmovl.u8 q8, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %tmp1)
ret <4 x i16> %tmp2
@ -31,11 +46,17 @@ define <4 x i16> @vcnt16(<4 x i16>* %A) nounwind {
define <8 x i16> @vcntQ16(<8 x i16>* %A) nounwind {
; CHECK-LABEL: vcntQ16:
; CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vrev16.8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vadd.i8 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vuzp.8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcnt.8 q8, q8
; CHECK-NEXT: vrev16.8 q9, q8
; CHECK-NEXT: vadd.i8 q8, q8, q9
; CHECK-NEXT: vorr q9, q8, q8
; CHECK-NEXT: vuzp.8 q8, q9
; CHECK-NEXT: vmovl.u8 q8, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %tmp1)
ret <8 x i16> %tmp2
@ -43,14 +64,21 @@ define <8 x i16> @vcntQ16(<8 x i16>* %A) nounwind {
define <2 x i32> @vcnt32(<2 x i32>* %A) nounwind {
; CHECK-LABEL: vcnt32:
; CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vrev16.8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vadd.i8 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vuzp.8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
; CHECK: vrev32.16 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vuzp.16 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcnt.8 d16, d16
; CHECK-NEXT: vrev16.8 d17, d16
; CHECK-NEXT: vadd.i8 d16, d16, d17
; CHECK-NEXT: vorr d17, d16, d16
; CHECK-NEXT: vuzp.8 d16, d17
; CHECK-NEXT: vmovl.u8 q8, d16
; CHECK-NEXT: vrev32.16 d18, d16
; CHECK-NEXT: vadd.i16 d16, d16, d18
; CHECK-NEXT: vorr d17, d16, d16
; CHECK-NEXT: vuzp.16 d16, d17
; CHECK-NEXT: vmovl.u16 q8, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %tmp1)
ret <2 x i32> %tmp2
@ -58,14 +86,22 @@ define <2 x i32> @vcnt32(<2 x i32>* %A) nounwind {
define <4 x i32> @vcntQ32(<4 x i32>* %A) nounwind {
; CHECK-LABEL: vcntQ32:
; CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vrev16.8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vadd.i8 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vuzp.8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
; CHECK: vrev32.16 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vuzp.16 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcnt.8 q8, q8
; CHECK-NEXT: vrev16.8 q9, q8
; CHECK-NEXT: vadd.i8 q8, q8, q9
; CHECK-NEXT: vorr q9, q8, q8
; CHECK-NEXT: vuzp.8 q8, q9
; CHECK-NEXT: vmovl.u8 q9, d16
; CHECK-NEXT: vrev32.16 q9, q9
; CHECK-NEXT: vaddw.u8 q8, q9, d16
; CHECK-NEXT: vorr q9, q8, q8
; CHECK-NEXT: vuzp.16 q8, q9
; CHECK-NEXT: vmovl.u16 q8, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %tmp1)
ret <4 x i32> %tmp2
@ -73,6 +109,51 @@ define <4 x i32> @vcntQ32(<4 x i32>* %A) nounwind {
define <1 x i64> @vcnt64(<1 x i64>* %A) nounwind {
; CHECK-LABEL: vcnt64:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, lr}
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: ldr r2, .LCPI6_0
; CHECK-NEXT: vmov.32 r0, d16[0]
; CHECK-NEXT: ldr r3, .LCPI6_3
; CHECK-NEXT: vmov.32 r1, d16[1]
; CHECK-NEXT: ldr lr, .LCPI6_2
; CHECK-NEXT: ldr r12, .LCPI6_1
; CHECK-NEXT: vldr s1, .LCPI6_4
; CHECK-NEXT: and r4, r2, r0, lsr #1
; CHECK-NEXT: sub r0, r0, r4
; CHECK-NEXT: and r2, r2, r1, lsr #1
; CHECK-NEXT: sub r1, r1, r2
; CHECK-NEXT: and r4, r0, r3
; CHECK-NEXT: and r0, r3, r0, lsr #2
; CHECK-NEXT: and r2, r1, r3
; CHECK-NEXT: add r0, r4, r0
; CHECK-NEXT: and r1, r3, r1, lsr #2
; CHECK-NEXT: add r1, r2, r1
; CHECK-NEXT: add r0, r0, r0, lsr #4
; CHECK-NEXT: and r0, r0, lr
; CHECK-NEXT: add r1, r1, r1, lsr #4
; CHECK-NEXT: mul r2, r0, r12
; CHECK-NEXT: and r0, r1, lr
; CHECK-NEXT: mul r1, r0, r12
; CHECK-NEXT: lsr r0, r2, #24
; CHECK-NEXT: add r0, r0, r1, lsr #24
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov r0, r1, d0
; CHECK-NEXT: pop {r4, lr}
; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI6_0:
; CHECK-NEXT: .long 1431655765 @ 0x55555555
; CHECK-NEXT: .LCPI6_1:
; CHECK-NEXT: .long 16843009 @ 0x1010101
; CHECK-NEXT: .LCPI6_2:
; CHECK-NEXT: .long 252645135 @ 0xf0f0f0f
; CHECK-NEXT: .LCPI6_3:
; CHECK-NEXT: .long 858993459 @ 0x33333333
; CHECK-NEXT: .LCPI6_4:
; CHECK-NEXT: .long 0 @ float 0
%tmp1 = load <1 x i64>, <1 x i64>* %A
%tmp2 = call <1 x i64> @llvm.ctpop.v1i64(<1 x i64> %tmp1)
ret <1 x i64> %tmp2
@ -80,6 +161,74 @@ define <1 x i64> @vcnt64(<1 x i64>* %A) nounwind {
define <2 x i64> @vcntQ64(<2 x i64>* %A) nounwind {
; CHECK-LABEL: vcntQ64:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r5, r6, lr}
; CHECK-NEXT: push {r4, r5, r6, lr}
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vmov.32 r1, d17[1]
; CHECK-NEXT: ldr lr, .LCPI7_0
; CHECK-NEXT: vmov.32 r2, d17[0]
; CHECK-NEXT: ldr r0, .LCPI7_2
; CHECK-NEXT: vmov.32 r3, d16[0]
; CHECK-NEXT: ldr r12, .LCPI7_1
; CHECK-NEXT: ldr r5, .LCPI7_3
; CHECK-NEXT: vldr s3, .LCPI7_4
; CHECK-NEXT: and r4, lr, r1, lsr #1
; CHECK-NEXT: sub r1, r1, r4
; CHECK-NEXT: and r4, r1, r0
; CHECK-NEXT: and r1, r0, r1, lsr #2
; CHECK-NEXT: add r1, r4, r1
; CHECK-NEXT: and r4, lr, r2, lsr #1
; CHECK-NEXT: sub r2, r2, r4
; CHECK-NEXT: and r4, r2, r0
; CHECK-NEXT: add r1, r1, r1, lsr #4
; CHECK-NEXT: and r2, r0, r2, lsr #2
; CHECK-NEXT: and r6, r1, r12
; CHECK-NEXT: add r2, r4, r2
; CHECK-NEXT: and r4, lr, r3, lsr #1
; CHECK-NEXT: sub r3, r3, r4
; CHECK-NEXT: and r4, r3, r0
; CHECK-NEXT: add r2, r2, r2, lsr #4
; CHECK-NEXT: and r3, r0, r3, lsr #2
; CHECK-NEXT: and r2, r2, r12
; CHECK-NEXT: add r3, r4, r3
; CHECK-NEXT: add r3, r3, r3, lsr #4
; CHECK-NEXT: and r3, r3, r12
; CHECK-NEXT: mul r4, r3, r5
; CHECK-NEXT: vmov.32 r3, d16[1]
; CHECK-NEXT: and r1, lr, r3, lsr #1
; CHECK-NEXT: sub r1, r3, r1
; CHECK-NEXT: and r3, r1, r0
; CHECK-NEXT: and r0, r0, r1, lsr #2
; CHECK-NEXT: mul r1, r2, r5
; CHECK-NEXT: add r0, r3, r0
; CHECK-NEXT: mul r2, r6, r5
; CHECK-NEXT: add r0, r0, r0, lsr #4
; CHECK-NEXT: and r0, r0, r12
; CHECK-NEXT: mul r3, r0, r5
; CHECK-NEXT: lsr r0, r1, #24
; CHECK-NEXT: lsr r1, r4, #24
; CHECK-NEXT: add r0, r0, r2, lsr #24
; CHECK-NEXT: vmov s2, r0
; CHECK-NEXT: add r0, r1, r3, lsr #24
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov.f32 s1, s3
; CHECK-NEXT: vmov r2, r3, d1
; CHECK-NEXT: vmov r0, r1, d0
; CHECK-NEXT: pop {r4, r5, r6, lr}
; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI7_0:
; CHECK-NEXT: .long 1431655765 @ 0x55555555
; CHECK-NEXT: .LCPI7_1:
; CHECK-NEXT: .long 252645135 @ 0xf0f0f0f
; CHECK-NEXT: .LCPI7_2:
; CHECK-NEXT: .long 858993459 @ 0x33333333
; CHECK-NEXT: .LCPI7_3:
; CHECK-NEXT: .long 16843009 @ 0x1010101
; CHECK-NEXT: .LCPI7_4:
; CHECK-NEXT: .long 0 @ float 0
%tmp1 = load <2 x i64>, <2 x i64>* %A
%tmp2 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp1)
ret <2 x i64> %tmp2
@ -95,48 +244,75 @@ declare <1 x i64> @llvm.ctpop.v1i64(<1 x i64>) nounwind readnone
declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
define <8 x i8> @vclz8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vclz8:
;CHECK: vclz.i8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK-LABEL: vclz8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vclz.i8 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0)
ret <8 x i8> %tmp2
}
define <4 x i16> @vclz16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: vclz16:
;CHECK: vclz.i16 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK-LABEL: vclz16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vclz.i16 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0)
ret <4 x i16> %tmp2
}
define <2 x i32> @vclz32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: vclz32:
;CHECK: vclz.i32 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK-LABEL: vclz32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vclz.i32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0)
ret <2 x i32> %tmp2
}
define <16 x i8> @vclzQ8(<16 x i8>* %A) nounwind {
;CHECK-LABEL: vclzQ8:
;CHECK: vclz.i8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK-LABEL: vclzQ8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vclz.i8 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0)
ret <16 x i8> %tmp2
}
define <8 x i16> @vclzQ16(<8 x i16>* %A) nounwind {
;CHECK-LABEL: vclzQ16:
;CHECK: vclz.i16 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK-LABEL: vclzQ16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vclz.i16 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %tmp1, i1 0)
ret <8 x i16> %tmp2
}
define <4 x i32> @vclzQ32(<4 x i32>* %A) nounwind {
;CHECK-LABEL: vclzQ32:
;CHECK: vclz.i32 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK-LABEL: vclzQ32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vclz.i32 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %tmp1, i1 0)
ret <4 x i32> %tmp2
@ -151,48 +327,75 @@ declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) nounwind readnone
declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone
define <8 x i8> @vclss8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vclss8:
;CHECK: vcls.s8
; CHECK-LABEL: vclss8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcls.s8 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8> %tmp1)
ret <8 x i8> %tmp2
}
define <4 x i16> @vclss16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: vclss16:
;CHECK: vcls.s16
; CHECK-LABEL: vclss16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcls.s16 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16> %tmp1)
ret <4 x i16> %tmp2
}
define <2 x i32> @vclss32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: vclss32:
;CHECK: vcls.s32
; CHECK-LABEL: vclss32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
; CHECK-NEXT: vcls.s32 d16, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32> %tmp1)
ret <2 x i32> %tmp2
}
define <16 x i8> @vclsQs8(<16 x i8>* %A) nounwind {
;CHECK-LABEL: vclsQs8:
;CHECK: vcls.s8
; CHECK-LABEL: vclsQs8:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcls.s8 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1)
ret <16 x i8> %tmp2
}
define <8 x i16> @vclsQs16(<8 x i16>* %A) nounwind {
;CHECK-LABEL: vclsQs16:
;CHECK: vcls.s16
; CHECK-LABEL: vclsQs16:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcls.s16 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %tmp1)
ret <8 x i16> %tmp2
}
define <4 x i32> @vclsQs32(<4 x i32>* %A) nounwind {
;CHECK-LABEL: vclsQs32:
;CHECK: vcls.s32
; CHECK-LABEL: vclsQs32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
; CHECK-NEXT: vcls.s32 q8, q8
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32> %tmp1)
ret <4 x i32> %tmp2