forked from OSchip/llvm-project
[ARM] Replace llvm.experimental.vector.reduce.smax with llvm.vector.reduce.smax. NFC
This fixes up some newer tests after D88787.
This commit is contained in:
parent
9b58b0c06e
commit
81b4f3380b
|
@ -7,7 +7,7 @@ define arm_aapcs_vfpcc zeroext i8 @uminv16i8(<16 x i8> %vec, i8 zeroext %min) {
|
|||
; CHECK-NEXT: vminv.u8 r0, q0
|
||||
; CHECK-NEXT: uxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp ult i8 %x, %min
|
||||
%1 = select i1 %cmp, i8 %x, i8 %min
|
||||
ret i8 %1
|
||||
|
@ -19,7 +19,7 @@ define arm_aapcs_vfpcc zeroext i16 @uminv8i16(<8 x i16> %vec, i16 zeroext %min)
|
|||
; CHECK-NEXT: vminv.u16 r0, q0
|
||||
; CHECK-NEXT: uxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp ult i16 %x, %min
|
||||
%1 = select i1 %cmp, i16 %x, i16 %min
|
||||
ret i16 %1
|
||||
|
@ -30,7 +30,7 @@ define arm_aapcs_vfpcc i32 @uminv4i32(<4 x i32> %vec, i32 %min) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vminv.u32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp ult i32 %x, %min
|
||||
%1 = select i1 %cmp, i32 %x, i32 %min
|
||||
ret i32 %1
|
||||
|
@ -42,7 +42,7 @@ define arm_aapcs_vfpcc signext i8 @sminv16i8(<16 x i8> %vec, i8 signext %min) {
|
|||
; CHECK-NEXT: vminv.s8 r0, q0
|
||||
; CHECK-NEXT: sxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp slt i8 %x, %min
|
||||
%1 = select i1 %cmp, i8 %x, i8 %min
|
||||
ret i8 %1
|
||||
|
@ -54,7 +54,7 @@ define arm_aapcs_vfpcc signext i16 @sminv8i16(<8 x i16> %vec, i16 signext %min)
|
|||
; CHECK-NEXT: vminv.s16 r0, q0
|
||||
; CHECK-NEXT: sxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp slt i16 %x, %min
|
||||
%1 = select i1 %cmp, i16 %x, i16 %min
|
||||
ret i16 %1
|
||||
|
@ -65,7 +65,7 @@ define arm_aapcs_vfpcc i32 @sminv4i32(<4 x i32> %vec, i32 %min) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vminv.s32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp slt i32 %x, %min
|
||||
%1 = select i1 %cmp, i32 %x, i32 %min
|
||||
ret i32 %1
|
||||
|
@ -77,7 +77,7 @@ define arm_aapcs_vfpcc zeroext i8 @umaxv16i8(<16 x i8> %vec, i8 zeroext %max) {
|
|||
; CHECK-NEXT: vmaxv.u8 r0, q0
|
||||
; CHECK-NEXT: uxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp ugt i8 %x, %max
|
||||
%1 = select i1 %cmp, i8 %x, i8 %max
|
||||
ret i8 %1
|
||||
|
@ -89,7 +89,7 @@ define arm_aapcs_vfpcc zeroext i16 @umaxv8i16(<8 x i16> %vec, i16 zeroext %max)
|
|||
; CHECK-NEXT: vmaxv.u16 r0, q0
|
||||
; CHECK-NEXT: uxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp ugt i16 %x, %max
|
||||
%1 = select i1 %cmp, i16 %x, i16 %max
|
||||
ret i16 %1
|
||||
|
@ -100,7 +100,7 @@ define arm_aapcs_vfpcc i32 @umaxv4i32(<4 x i32> %vec, i32 %max) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vmaxv.u32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp ugt i32 %x, %max
|
||||
%1 = select i1 %cmp, i32 %x, i32 %max
|
||||
ret i32 %1
|
||||
|
@ -112,7 +112,7 @@ define arm_aapcs_vfpcc signext i8 @smaxv16i8(<16 x i8> %vec, i8 signext %max) {
|
|||
; CHECK-NEXT: vmaxv.s8 r0, q0
|
||||
; CHECK-NEXT: sxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp sgt i8 %x, %max
|
||||
%1 = select i1 %cmp, i8 %x, i8 %max
|
||||
ret i8 %1
|
||||
|
@ -124,7 +124,7 @@ define arm_aapcs_vfpcc signext i16 @smaxv8i16(<8 x i16> %vec, i16 signext %max)
|
|||
; CHECK-NEXT: vmaxv.s16 r0, q0
|
||||
; CHECK-NEXT: sxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp sgt i16 %x, %max
|
||||
%1 = select i1 %cmp, i16 %x, i16 %max
|
||||
ret i16 %1
|
||||
|
@ -135,7 +135,7 @@ define arm_aapcs_vfpcc i32 @smaxv4i32(<4 x i32> %vec, i32 %max) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vmaxv.s32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp sgt i32 %x, %max
|
||||
%1 = select i1 %cmp, i32 %x, i32 %max
|
||||
ret i32 %1
|
||||
|
@ -147,7 +147,7 @@ define arm_aapcs_vfpcc zeroext i8 @commute_uminv16i8(<16 x i8> %vec, i8 zeroext
|
|||
; CHECK-NEXT: vminv.u8 r0, q0
|
||||
; CHECK-NEXT: uxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp ult i8 %min, %x
|
||||
%1 = select i1 %cmp, i8 %min, i8 %x
|
||||
ret i8 %1
|
||||
|
@ -159,7 +159,7 @@ define arm_aapcs_vfpcc zeroext i16 @commute_uminv8i16(<8 x i16> %vec, i16 zeroex
|
|||
; CHECK-NEXT: vminv.u16 r0, q0
|
||||
; CHECK-NEXT: uxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp ult i16 %min, %x
|
||||
%1 = select i1 %cmp, i16 %min, i16 %x
|
||||
ret i16 %1
|
||||
|
@ -170,7 +170,7 @@ define arm_aapcs_vfpcc i32 @commute_uminv4i32(<4 x i32> %vec, i32 %min) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vminv.u32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp ult i32 %min, %x
|
||||
%1 = select i1 %cmp, i32 %min, i32 %x
|
||||
ret i32 %1
|
||||
|
@ -182,7 +182,7 @@ define arm_aapcs_vfpcc signext i8 @commute_sminv16i8(<16 x i8> %vec, i8 signext
|
|||
; CHECK-NEXT: vminv.s8 r0, q0
|
||||
; CHECK-NEXT: sxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp slt i8 %min, %x
|
||||
%1 = select i1 %cmp, i8 %min, i8 %x
|
||||
ret i8 %1
|
||||
|
@ -194,7 +194,7 @@ define arm_aapcs_vfpcc signext i16 @commute_sminv8i16(<8 x i16> %vec, i16 signex
|
|||
; CHECK-NEXT: vminv.s16 r0, q0
|
||||
; CHECK-NEXT: sxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp slt i16 %min, %x
|
||||
%1 = select i1 %cmp, i16 %min, i16 %x
|
||||
ret i16 %1
|
||||
|
@ -205,7 +205,7 @@ define arm_aapcs_vfpcc i32 @commute_sminv4i32(<4 x i32> %vec, i32 %min) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vminv.s32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp slt i32 %min, %x
|
||||
%1 = select i1 %cmp, i32 %min, i32 %x
|
||||
ret i32 %1
|
||||
|
@ -217,7 +217,7 @@ define arm_aapcs_vfpcc zeroext i8 @commute_umaxv16i8(<16 x i8> %vec, i8 zeroext
|
|||
; CHECK-NEXT: vmaxv.u8 r0, q0
|
||||
; CHECK-NEXT: uxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp ugt i8 %max, %x
|
||||
%1 = select i1 %cmp, i8 %max, i8 %x
|
||||
ret i8 %1
|
||||
|
@ -229,7 +229,7 @@ define arm_aapcs_vfpcc zeroext i16 @commute_umaxv8i16(<8 x i16> %vec, i16 zeroex
|
|||
; CHECK-NEXT: vmaxv.u16 r0, q0
|
||||
; CHECK-NEXT: uxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp ugt i16 %max, %x
|
||||
%1 = select i1 %cmp, i16 %max, i16 %x
|
||||
ret i16 %1
|
||||
|
@ -240,7 +240,7 @@ define arm_aapcs_vfpcc i32 @commute_umaxv4i32(<4 x i32> %vec, i32 %max) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vmaxv.u32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp ugt i32 %max, %x
|
||||
%1 = select i1 %cmp, i32 %max, i32 %x
|
||||
ret i32 %1
|
||||
|
@ -252,7 +252,7 @@ define arm_aapcs_vfpcc signext i8 @commute_smaxv16i8(<16 x i8> %vec, i8 signext
|
|||
; CHECK-NEXT: vmaxv.s8 r0, q0
|
||||
; CHECK-NEXT: sxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp sgt i8 %max, %x
|
||||
%1 = select i1 %cmp, i8 %max, i8 %x
|
||||
ret i8 %1
|
||||
|
@ -264,7 +264,7 @@ define arm_aapcs_vfpcc signext i16 @commute_smaxv8i16(<8 x i16> %vec, i16 signex
|
|||
; CHECK-NEXT: vmaxv.s16 r0, q0
|
||||
; CHECK-NEXT: sxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp sgt i16 %max, %x
|
||||
%1 = select i1 %cmp, i16 %max, i16 %x
|
||||
ret i16 %1
|
||||
|
@ -275,7 +275,7 @@ define arm_aapcs_vfpcc i32 @commute_smaxv4i32(<4 x i32> %vec, i32 %max) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vmaxv.s32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp sgt i32 %max, %x
|
||||
%1 = select i1 %cmp, i32 %max, i32 %x
|
||||
ret i32 %1
|
||||
|
@ -291,7 +291,7 @@ define arm_aapcs_vfpcc signext i8 @mismatch_smaxv16i8(<16 x i8> %vec, i8 signext
|
|||
; CHECK-NEXT: csel r0, r0, r1, gt
|
||||
; CHECK-NEXT: sxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp sgt i8 %x, %max
|
||||
%1 = select i1 %cmp, i8 %max, i8 %x
|
||||
ret i8 %1
|
||||
|
@ -307,7 +307,7 @@ define arm_aapcs_vfpcc signext i8 @mismatch2_smaxv16i8(<16 x i8> %vec, i8 signex
|
|||
; CHECK-NEXT: csel r0, r1, r0, gt
|
||||
; CHECK-NEXT: sxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp sgt i8 %max, %x
|
||||
%1 = select i1 %cmp, i8 %x, i8 %max
|
||||
ret i8 %1
|
||||
|
@ -319,7 +319,7 @@ define arm_aapcs_vfpcc zeroext i8 @inverted_uminv16i8(<16 x i8> %vec, i8 zeroext
|
|||
; CHECK-NEXT: vminv.u8 r0, q0
|
||||
; CHECK-NEXT: uxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp ugt i8 %x, %min
|
||||
%1 = select i1 %cmp, i8 %min, i8 %x
|
||||
ret i8 %1
|
||||
|
@ -331,7 +331,7 @@ define arm_aapcs_vfpcc zeroext i16 @inverted_uminv8i16(<8 x i16> %vec, i16 zeroe
|
|||
; CHECK-NEXT: vminv.u16 r0, q0
|
||||
; CHECK-NEXT: uxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp ugt i16 %x, %min
|
||||
%1 = select i1 %cmp, i16 %min, i16 %x
|
||||
ret i16 %1
|
||||
|
@ -342,7 +342,7 @@ define arm_aapcs_vfpcc i32 @inverted_uminv4i32(<4 x i32> %vec, i32 %min) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vminv.u32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp ugt i32 %x, %min
|
||||
%1 = select i1 %cmp, i32 %min, i32 %x
|
||||
ret i32 %1
|
||||
|
@ -354,7 +354,7 @@ define arm_aapcs_vfpcc signext i8 @inverted_sminv16i8(<16 x i8> %vec, i8 signext
|
|||
; CHECK-NEXT: vminv.s8 r0, q0
|
||||
; CHECK-NEXT: sxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp sgt i8 %x, %min
|
||||
%1 = select i1 %cmp, i8 %min, i8 %x
|
||||
ret i8 %1
|
||||
|
@ -366,7 +366,7 @@ define arm_aapcs_vfpcc signext i16 @inverted_sminv8i16(<8 x i16> %vec, i16 signe
|
|||
; CHECK-NEXT: vminv.s16 r0, q0
|
||||
; CHECK-NEXT: sxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp sgt i16 %x, %min
|
||||
%1 = select i1 %cmp, i16 %min, i16 %x
|
||||
ret i16 %1
|
||||
|
@ -377,7 +377,7 @@ define arm_aapcs_vfpcc i32 @inverted_sminv4i32(<4 x i32> %vec, i32 %min) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vminv.s32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp sgt i32 %x, %min
|
||||
%1 = select i1 %cmp, i32 %min, i32 %x
|
||||
ret i32 %1
|
||||
|
@ -389,7 +389,7 @@ define arm_aapcs_vfpcc zeroext i8 @inverted_umaxv16i8(<16 x i8> %vec, i8 zeroext
|
|||
; CHECK-NEXT: vmaxv.u8 r0, q0
|
||||
; CHECK-NEXT: uxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp ult i8 %x, %max
|
||||
%1 = select i1 %cmp, i8 %max, i8 %x
|
||||
ret i8 %1
|
||||
|
@ -401,7 +401,7 @@ define arm_aapcs_vfpcc zeroext i16 @inverted_umaxv8i16(<8 x i16> %vec, i16 zeroe
|
|||
; CHECK-NEXT: vmaxv.u16 r0, q0
|
||||
; CHECK-NEXT: uxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp ult i16 %x, %max
|
||||
%1 = select i1 %cmp, i16 %max, i16 %x
|
||||
ret i16 %1
|
||||
|
@ -412,7 +412,7 @@ define arm_aapcs_vfpcc i32 @inverted_umaxv4i32(<4 x i32> %vec, i32 %max) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vmaxv.u32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp ult i32 %x, %max
|
||||
%1 = select i1 %cmp, i32 %max, i32 %x
|
||||
ret i32 %1
|
||||
|
@ -424,7 +424,7 @@ define arm_aapcs_vfpcc signext i8 @inverted_smaxv16i8(<16 x i8> %vec, i8 signext
|
|||
; CHECK-NEXT: vmaxv.s8 r0, q0
|
||||
; CHECK-NEXT: sxtb r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec)
|
||||
%x = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %vec)
|
||||
%cmp = icmp slt i8 %x, %max
|
||||
%1 = select i1 %cmp, i8 %max, i8 %x
|
||||
ret i8 %1
|
||||
|
@ -436,7 +436,7 @@ define arm_aapcs_vfpcc signext i16 @inverted_smaxv8i16(<8 x i16> %vec, i16 signe
|
|||
; CHECK-NEXT: vmaxv.s16 r0, q0
|
||||
; CHECK-NEXT: sxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %vec)
|
||||
%cmp = icmp slt i16 %x, %max
|
||||
%1 = select i1 %cmp, i16 %max, i16 %x
|
||||
ret i16 %1
|
||||
|
@ -447,7 +447,7 @@ define arm_aapcs_vfpcc i32 @inverted_smaxv4i32(<4 x i32> %vec, i32 %max) {
|
|||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vmaxv.s32 r0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %vec)
|
||||
%x = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %vec)
|
||||
%cmp = icmp slt i32 %x, %max
|
||||
%1 = select i1 %cmp, i32 %max, i32 %x
|
||||
ret i32 %1
|
||||
|
@ -464,7 +464,7 @@ define arm_aapcs_vfpcc signext i16 @trunc_and_sext(<8 x i16> %vec, i32 %max) #1
|
|||
; CHECK-NEXT: csel r0, r0, r1, gt
|
||||
; CHECK-NEXT: sxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %vec)
|
||||
%xs = sext i16 %x to i32
|
||||
%cmp = icmp sgt i32 %max, %xs
|
||||
%mt = trunc i32 %max to i16
|
||||
|
@ -482,7 +482,7 @@ define arm_aapcs_vfpcc signext i16 @trunc_and_zext(<8 x i16> %vec, i32 %max) #1
|
|||
; CHECK-NEXT: csel r0, r0, r1, gt
|
||||
; CHECK-NEXT: sxth r0, r0
|
||||
; CHECK-NEXT: bx lr
|
||||
%x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec)
|
||||
%x = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %vec)
|
||||
%xs = zext i16 %x to i32
|
||||
%cmp = icmp sgt i32 %max, %xs
|
||||
%mt = trunc i32 %max to i16
|
||||
|
@ -515,7 +515,7 @@ define arm_aapcs_vfpcc i64 @uminv2i64(<2 x i64> %vec, i64 %min) {
|
|||
; CHECK-NEXT: csel r0, r5, r0, ne
|
||||
; CHECK-NEXT: csel r1, r3, r1, ne
|
||||
; CHECK-NEXT: pop {r4, r5, r7, pc}
|
||||
%x = call i64 @llvm.experimental.vector.reduce.umin.v2i64(<2 x i64> %vec)
|
||||
%x = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %vec)
|
||||
%cmp = icmp ult i64 %x, %min
|
||||
%1 = select i1 %cmp, i64 %x, i64 %min
|
||||
ret i64 %1
|
||||
|
@ -546,7 +546,7 @@ define arm_aapcs_vfpcc i64 @sminv2i64(<2 x i64> %vec, i64 %min) {
|
|||
; CHECK-NEXT: csel r0, r5, r0, ne
|
||||
; CHECK-NEXT: csel r1, r3, r1, ne
|
||||
; CHECK-NEXT: pop {r4, r5, r7, pc}
|
||||
%x = call i64 @llvm.experimental.vector.reduce.smin.v2i64(<2 x i64> %vec)
|
||||
%x = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %vec)
|
||||
%cmp = icmp slt i64 %x, %min
|
||||
%1 = select i1 %cmp, i64 %x, i64 %min
|
||||
ret i64 %1
|
||||
|
@ -577,7 +577,7 @@ define arm_aapcs_vfpcc i64 @umaxv2i64(<2 x i64> %vec, i64 %max) {
|
|||
; CHECK-NEXT: csel r0, r5, r0, ne
|
||||
; CHECK-NEXT: csel r1, r3, r1, ne
|
||||
; CHECK-NEXT: pop {r4, r5, r7, pc}
|
||||
%x = call i64 @llvm.experimental.vector.reduce.umax.v2i64(<2 x i64> %vec)
|
||||
%x = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %vec)
|
||||
%cmp = icmp ugt i64 %x, %max
|
||||
%1 = select i1 %cmp, i64 %x, i64 %max
|
||||
ret i64 %1
|
||||
|
@ -608,40 +608,40 @@ define arm_aapcs_vfpcc i64 @smaxv2i64(<2 x i64> %vec, i64 %max) {
|
|||
; CHECK-NEXT: csel r0, r5, r0, ne
|
||||
; CHECK-NEXT: csel r1, r3, r1, ne
|
||||
; CHECK-NEXT: pop {r4, r5, r7, pc}
|
||||
%x = call i64 @llvm.experimental.vector.reduce.smax.v2i64(<2 x i64> %vec)
|
||||
%x = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %vec)
|
||||
%cmp = icmp sgt i64 %x, %max
|
||||
%1 = select i1 %cmp, i64 %x, i64 %max
|
||||
ret i64 %1
|
||||
}
|
||||
|
||||
declare i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8>)
|
||||
declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>)
|
||||
|
||||
declare i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16>)
|
||||
declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>)
|
||||
|
||||
declare i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32>)
|
||||
declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>)
|
||||
|
||||
declare i64 @llvm.experimental.vector.reduce.umin.v2i64(<2 x i64>)
|
||||
declare i64 @llvm.vector.reduce.umin.v2i64(<2 x i64>)
|
||||
|
||||
declare i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8>)
|
||||
declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>)
|
||||
|
||||
declare i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16>)
|
||||
declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>)
|
||||
|
||||
declare i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32>)
|
||||
declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>)
|
||||
|
||||
declare i64 @llvm.experimental.vector.reduce.smin.v2i64(<2 x i64>)
|
||||
declare i64 @llvm.vector.reduce.smin.v2i64(<2 x i64>)
|
||||
|
||||
declare i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8>)
|
||||
declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>)
|
||||
|
||||
declare i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16>)
|
||||
declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>)
|
||||
|
||||
declare i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32>)
|
||||
declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>)
|
||||
|
||||
declare i64 @llvm.experimental.vector.reduce.umax.v2i64(<2 x i64>)
|
||||
declare i64 @llvm.vector.reduce.umax.v2i64(<2 x i64>)
|
||||
|
||||
declare i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8>)
|
||||
declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>)
|
||||
|
||||
declare i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16>)
|
||||
declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>)
|
||||
|
||||
declare i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32>)
|
||||
declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>)
|
||||
|
||||
declare i64 @llvm.experimental.vector.reduce.smax.v2i64(<2 x i64>)
|
||||
declare i64 @llvm.vector.reduce.smax.v2i64(<2 x i64>)
|
||||
|
|
Loading…
Reference in New Issue