forked from OSchip/llvm-project
1194 lines
50 KiB
LLVM
1194 lines
50 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -verify-machineinstrs < %s | FileCheck %s
|
|
|
|
target triple = "aarch64-unknown-linux-gnu"
|
|
|
|
;
|
|
; VECTOR_SPLICE (index)
|
|
;
|
|
|
|
define <vscale x 16 x i8> @splice_nxv16i8_first_idx(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv16i8_first_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 0)
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
define <vscale x 16 x i8> @splice_nxv16i8_last_idx(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv16i8_last_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #15
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 15)
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
; Ensure index is clamped when we cannot prove it's less than VL-1.
|
|
define <vscale x 16 x i8> @splice_nxv16i8_clamped_idx(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv16i8_clamped_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: rdvl x9, #1
|
|
; CHECK-NEXT: sub x9, x9, #1
|
|
; CHECK-NEXT: ptrue p0.b
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w10, #16
|
|
; CHECK-NEXT: cmp x9, #16
|
|
; CHECK-NEXT: st1b { z0.b }, p0, [sp]
|
|
; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x9, x10, lo
|
|
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8, x9]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 16)
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @splice_nxv8i16_first_idx(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8i16_first_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 0)
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @splice_nxv8i16_last_idx(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8i16_last_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #14
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 7)
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
; Ensure index is clamped when we cannot prove it's less than VL-1.
|
|
define <vscale x 8 x i16> @splice_nxv8i16_clamped_idx(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8i16_clamped_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: cnth x10
|
|
; CHECK-NEXT: sub x10, x10, #1
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w9, #8
|
|
; CHECK-NEXT: cmp x10, #8
|
|
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
|
|
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x10, x9, lo
|
|
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8, x9, lsl #1]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 8)
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @splice_nxv4i32_first_idx(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4i32_first_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 0)
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @splice_nxv4i32_last_idx(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4i32_last_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #12
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 3)
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
; Ensure index is clamped when we cannot prove it's less than VL-1.
|
|
define <vscale x 4 x i32> @splice_nxv4i32_clamped_idx(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4i32_clamped_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: cntw x10
|
|
; CHECK-NEXT: sub x10, x10, #1
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w9, #4
|
|
; CHECK-NEXT: cmp x10, #4
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x10, x9, lo
|
|
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, x9, lsl #2]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 4)
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @splice_nxv2i64_first_idx(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2i64_first_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 0)
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @splice_nxv2i64_last_idx(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2i64_last_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 1)
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
; Ensure index is clamped when we cannot prove it's less than VL-1.
|
|
define <vscale x 2 x i64> @splice_nxv2i64_clamped_idx(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2i64_clamped_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: cntd x10
|
|
; CHECK-NEXT: sub x10, x10, #1
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w9, #2
|
|
; CHECK-NEXT: cmp x10, #2
|
|
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
|
|
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x10, x9, lo
|
|
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, x9, lsl #3]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 2)
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @splice_nxv2f16_neg_idx(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f16_neg_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: lastb d0, p0, z0.d
|
|
; CHECK-NEXT: insr z1.d, d0
|
|
; CHECK-NEXT: mov z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -1)
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @splice_nxv2f16_neg2_idx(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f16_neg2_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
|
|
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: mov x9, #-8
|
|
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8, x9, lsl #1]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -2)
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @splice_nxv2f16_first_idx(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f16_first_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 0)
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @splice_nxv2f16_1_idx(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f16_1_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 1)
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
; Ensure index is clamped when we cannot prove it's less than VL-1.
|
|
define <vscale x 2 x half> @splice_nxv2f16_last_idx(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f16_last_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: cntd x10
|
|
; CHECK-NEXT: sub x10, x10, #1
|
|
; CHECK-NEXT: mov w9, #2
|
|
; CHECK-NEXT: cmp x10, #2
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: csel x9, x10, x9, lo
|
|
; CHECK-NEXT: ptrue p1.b
|
|
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
|
|
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: lsl x9, x9, #3
|
|
; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 2)
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @splice_nxv4f16_neg_idx(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4f16_neg_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: lastb s0, p0, z0.s
|
|
; CHECK-NEXT: insr z1.s, s0
|
|
; CHECK-NEXT: mov z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -1)
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @splice_nxv4f16_neg3_idx(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4f16_neg3_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
|
|
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: mov x9, #-6
|
|
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8, x9, lsl #1]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -3)
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @splice_nxv4f16_first_idx(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4f16_first_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 0)
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @splice_nxv4f16_3_idx(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4f16_3_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #12
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 3)
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
; Ensure index is clamped when we cannot prove it's less than VL-1.
|
|
define <vscale x 4 x half> @splice_nxv4f16_last_idx(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4f16_last_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: cntw x10
|
|
; CHECK-NEXT: sub x10, x10, #1
|
|
; CHECK-NEXT: mov w9, #4
|
|
; CHECK-NEXT: cmp x10, #4
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: csel x9, x10, x9, lo
|
|
; CHECK-NEXT: ptrue p1.b
|
|
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
|
|
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: lsl x9, x9, #2
|
|
; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 4)
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
|
|
define <vscale x 8 x half> @splice_nxv8f16_first_idx(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8f16_first_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 0)
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @splice_nxv8f16_last_idx(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8f16_last_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #14
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 7)
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
; Ensure index is clamped when we cannot prove it's less than VL-1.
|
|
define <vscale x 8 x half> @splice_nxv8f16_clamped_idx(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8f16_clamped_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: cnth x10
|
|
; CHECK-NEXT: sub x10, x10, #1
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w9, #8
|
|
; CHECK-NEXT: cmp x10, #8
|
|
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
|
|
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x10, x9, lo
|
|
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8, x9, lsl #1]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 8)
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @splice_nxv2f32_neg_idx(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f32_neg_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: lastb d0, p0, z0.d
|
|
; CHECK-NEXT: insr z1.d, d0
|
|
; CHECK-NEXT: mov z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -1)
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @splice_nxv2f32_neg2_idx(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f32_neg2_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: mov x9, #-4
|
|
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, x9, lsl #2]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -2)
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @splice_nxv2f32_first_idx(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f32_first_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 0)
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @splice_nxv2f32_1_idx(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f32_1_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 1)
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
; Ensure index is clamped when we cannot prove it's less than VL-1.
|
|
define <vscale x 2 x float> @splice_nxv2f32_last_idx(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f32_last_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: cntd x10
|
|
; CHECK-NEXT: sub x10, x10, #1
|
|
; CHECK-NEXT: mov w9, #2
|
|
; CHECK-NEXT: cmp x10, #2
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: csel x9, x10, x9, lo
|
|
; CHECK-NEXT: ptrue p1.b
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: lsl x9, x9, #3
|
|
; CHECK-NEXT: ld1b { z0.b }, p1/z, [x8, x9]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 2)
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @splice_nxv4f32_first_idx(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4f32_first_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 0)
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @splice_nxv4f32_last_idx(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4f32_last_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #12
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 3)
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
; Ensure index is clamped when we cannot prove it's less than VL-1.
|
|
define <vscale x 4 x float> @splice_nxv4f32_clamped_idx(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4f32_clamped_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: cntw x10
|
|
; CHECK-NEXT: sub x10, x10, #1
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w9, #4
|
|
; CHECK-NEXT: cmp x10, #4
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x10, x9, lo
|
|
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, x9, lsl #2]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 4)
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @splice_nxv2f64_first_idx(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f64_first_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 0)
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @splice_nxv2f64_last_idx(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f64_last_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 1)
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
; Ensure index is clamped when we cannot prove it's less than VL-1.
|
|
define <vscale x 2 x double> @splice_nxv2f64_clamped_idx(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f64_clamped_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: cntd x10
|
|
; CHECK-NEXT: sub x10, x10, #1
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w9, #2
|
|
; CHECK-NEXT: cmp x10, #2
|
|
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
|
|
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x10, x9, lo
|
|
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, x9, lsl #3]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 2)
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
; Ensure predicate based splice is promoted to use ZPRs.
|
|
define <vscale x 2 x i1> @splice_nxv2i1_idx(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2i1_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.d, p1/z, #1 // =0x1
|
|
; CHECK-NEXT: mov z1.d, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8
|
|
; CHECK-NEXT: and z1.d, z1.d, #0x1
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x i1> @llvm.experimental.vector.splice.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b, i32 1)
|
|
ret <vscale x 2 x i1> %res
|
|
}
|
|
|
|
; Ensure predicate based splice is promoted to use ZPRs.
|
|
define <vscale x 4 x i1> @splice_nxv4i1_idx(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4i1_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.s, p1/z, #1 // =0x1
|
|
; CHECK-NEXT: mov z1.s, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8
|
|
; CHECK-NEXT: and z1.s, z1.s, #0x1
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x i1> @llvm.experimental.vector.splice.nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b, i32 2)
|
|
ret <vscale x 4 x i1> %res
|
|
}
|
|
|
|
; Ensure predicate based splice is promoted to use ZPRs.
|
|
define <vscale x 8 x i1> @splice_nxv8i1_idx(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8i1_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.h, p1/z, #1 // =0x1
|
|
; CHECK-NEXT: mov z1.h, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8
|
|
; CHECK-NEXT: and z1.h, z1.h, #0x1
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: cmpne p0.h, p0/z, z1.h, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x i1> @llvm.experimental.vector.splice.nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b, i32 4)
|
|
ret <vscale x 8 x i1> %res
|
|
}
|
|
|
|
; Ensure predicate based splice is promoted to use ZPRs.
|
|
define <vscale x 16 x i1> @splice_nxv16i1_idx(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv16i1_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1
|
|
; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8
|
|
; CHECK-NEXT: and z1.b, z1.b, #0x1
|
|
; CHECK-NEXT: ptrue p0.b
|
|
; CHECK-NEXT: cmpne p0.b, p0/z, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 16 x i1> @llvm.experimental.vector.splice.nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, i32 8)
|
|
ret <vscale x 16 x i1> %res
|
|
}
|
|
|
|
; Verify promote type legalisation works as expected.
|
|
define <vscale x 2 x i8> @splice_nxv2i8_idx(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2i8_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ext z0.b, z0.b, z1.b, #8
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x i8> @llvm.experimental.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 1)
|
|
ret <vscale x 2 x i8> %res
|
|
}
|
|
|
|
; Verify splitvec type legalisation works as expected.
|
|
define <vscale x 8 x i32> @splice_nxv8i32_idx(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8i32_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-4
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z3.s }, p0, [x8, #3, mul vl]
|
|
; CHECK-NEXT: st1w { z2.s }, p0, [x8, #2, mul vl]
|
|
; CHECK-NEXT: orr x8, x8, #0x8
|
|
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8]
|
|
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl sp, sp, #4
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x i32> @llvm.experimental.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 2)
|
|
ret <vscale x 8 x i32> %res
|
|
}
|
|
|
|
; Verify splitvec type legalisation works as expected.
|
|
define <vscale x 16 x float> @splice_nxv16f32_clamped_idx(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv16f32_clamped_idx:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-8
|
|
; CHECK-NEXT: rdvl x10, #1
|
|
; CHECK-NEXT: sub x10, x10, #1
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w9, #16
|
|
; CHECK-NEXT: cmp x10, #16
|
|
; CHECK-NEXT: st1w { z3.s }, p0, [x8, #3, mul vl]
|
|
; CHECK-NEXT: st1w { z2.s }, p0, [x8, #2, mul vl]
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z7.s }, p0, [x8, #7, mul vl]
|
|
; CHECK-NEXT: st1w { z4.s }, p0, [x8, #4, mul vl]
|
|
; CHECK-NEXT: st1w { z5.s }, p0, [x8, #5, mul vl]
|
|
; CHECK-NEXT: st1w { z6.s }, p0, [x8, #6, mul vl]
|
|
; CHECK-NEXT: csel x9, x10, x9, lo
|
|
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, x9, lsl #2]
|
|
; CHECK-NEXT: add x8, x8, x9, lsl #2
|
|
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x8, #1, mul vl]
|
|
; CHECK-NEXT: ld1w { z2.s }, p0/z, [x8, #2, mul vl]
|
|
; CHECK-NEXT: ld1w { z3.s }, p0/z, [x8, #3, mul vl]
|
|
; CHECK-NEXT: addvl sp, sp, #8
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 16 x float> @llvm.experimental.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 16)
|
|
ret <vscale x 16 x float> %res
|
|
}
|
|
|
|
;
|
|
; VECTOR_SPLICE (trailing elements)
|
|
;
|
|
|
|
define <vscale x 16 x i8> @splice_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv16i8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: ptrue p0.b
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1b { z0.b }, p0, [sp]
|
|
; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: mov x9, #-16
|
|
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8, x9]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -16)
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
define <vscale x 16 x i8> @splice_nxv16i8_1(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv16i8_1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.b
|
|
; CHECK-NEXT: lastb b0, p0, z0.b
|
|
; CHECK-NEXT: insr z1.b, b0
|
|
; CHECK-NEXT: mov z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -1)
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
; Ensure number of trailing elements is clamped when we cannot prove it's less than VL.
|
|
define <vscale x 16 x i8> @splice_nxv16i8_clamped(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv16i8_clamped:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: rdvl x9, #1
|
|
; CHECK-NEXT: ptrue p0.b
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w10, #17
|
|
; CHECK-NEXT: cmp x9, #17
|
|
; CHECK-NEXT: st1b { z0.b }, p0, [sp]
|
|
; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x9, x10, lo
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: sub x8, x8, x9
|
|
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -17)
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @splice_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
|
|
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: mov x9, #-8
|
|
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8, x9, lsl #1]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -8)
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @splice_nxv8i16_1(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8i16_1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: lastb h0, p0, z0.h
|
|
; CHECK-NEXT: insr z1.h, h0
|
|
; CHECK-NEXT: mov z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -1)
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
; Ensure number of trailing elements is clamped when we cannot prove it's less than VL.
|
|
define <vscale x 8 x i16> @splice_nxv8i16_clamped(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8i16_clamped:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: rdvl x9, #1
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w10, #18
|
|
; CHECK-NEXT: cmp x9, #18
|
|
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
|
|
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x9, x10, lo
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: sub x8, x8, x9
|
|
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -9)
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @splice_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: mov x9, #-4
|
|
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, x9, lsl #2]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -4)
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @splice_nxv4i32_1(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4i32_1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: lastb s0, p0, z0.s
|
|
; CHECK-NEXT: insr z1.s, s0
|
|
; CHECK-NEXT: mov z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -1)
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
; Ensure number of trailing elements is clamped when we cannot prove it's less than VL.
|
|
define <vscale x 4 x i32> @splice_nxv4i32_clamped(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4i32_clamped:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: rdvl x9, #1
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w10, #20
|
|
; CHECK-NEXT: cmp x9, #20
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x9, x10, lo
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: sub x8, x8, x9
|
|
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -5)
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @splice_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
|
|
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: mov x9, #-2
|
|
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, x9, lsl #3]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -2)
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @splice_nxv2i64_1(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2i64_1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: lastb d0, p0, z0.d
|
|
; CHECK-NEXT: insr z1.d, d0
|
|
; CHECK-NEXT: mov z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -1)
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
; Ensure number of trailing elements is clamped when we cannot prove it's less than VL.
|
|
define <vscale x 2 x i64> @splice_nxv2i64_clamped(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2i64_clamped:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: rdvl x9, #1
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w10, #24
|
|
; CHECK-NEXT: cmp x9, #24
|
|
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
|
|
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x9, x10, lo
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: sub x8, x8, x9
|
|
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -3)
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @splice_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
|
|
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: mov x9, #-8
|
|
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8, x9, lsl #1]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -8)
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @splice_nxv8f16_1(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8f16_1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: lastb h0, p0, z0.h
|
|
; CHECK-NEXT: insr z1.h, h0
|
|
; CHECK-NEXT: mov z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -1)
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
; Ensure number of trailing elements is clamped when we cannot prove it's less than VL.
|
|
define <vscale x 8 x half> @splice_nxv8f16_clamped(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8f16_clamped:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: rdvl x9, #1
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w10, #18
|
|
; CHECK-NEXT: cmp x9, #18
|
|
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
|
|
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x9, x10, lo
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: sub x8, x8, x9
|
|
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -9)
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @splice_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: mov x9, #-4
|
|
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, x9, lsl #2]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -4)
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @splice_nxv4f32_1(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4f32_1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: lastb s0, p0, z0.s
|
|
; CHECK-NEXT: insr z1.s, s0
|
|
; CHECK-NEXT: mov z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -1)
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
; Ensure number of trailing elements is clamped when we cannot prove it's less than VL.
|
|
define <vscale x 4 x float> @splice_nxv4f32_clamped(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4f32_clamped:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: rdvl x9, #1
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w10, #20
|
|
; CHECK-NEXT: cmp x9, #20
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x9, x10, lo
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: sub x8, x8, x9
|
|
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -5)
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @splice_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
|
|
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: mov x9, #-2
|
|
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, x9, lsl #3]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -2)
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @splice_nxv2f64_1(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f64_1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: lastb d0, p0, z0.d
|
|
; CHECK-NEXT: insr z1.d, d0
|
|
; CHECK-NEXT: mov z0.d, z1.d
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -1)
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
; Ensure number of trailing elements is clamped when we cannot prove it's less than VL.
|
|
define <vscale x 2 x double> @splice_nxv2f64_clamped(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2f64_clamped:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: rdvl x9, #1
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w10, #24
|
|
; CHECK-NEXT: cmp x9, #24
|
|
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
|
|
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: csel x9, x9, x10, lo
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: sub x8, x8, x9
|
|
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -3)
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
; Ensure predicate based splice is promoted to use ZPRs.
|
|
define <vscale x 2 x i1> @splice_nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.d, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: lastb d0, p0, z0.d
|
|
; CHECK-NEXT: mov z1.d, p1/z, #1 // =0x1
|
|
; CHECK-NEXT: insr z1.d, d0
|
|
; CHECK-NEXT: and z1.d, z1.d, #0x1
|
|
; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x i1> @llvm.experimental.vector.splice.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b, i32 -1)
|
|
ret <vscale x 2 x i1> %res
|
|
}
|
|
|
|
; Ensure predicate based splice is promoted to use ZPRs.
|
|
define <vscale x 4 x i1> @splice_nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv4i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: lastb s0, p0, z0.s
|
|
; CHECK-NEXT: mov z1.s, p1/z, #1 // =0x1
|
|
; CHECK-NEXT: insr z1.s, s0
|
|
; CHECK-NEXT: and z1.s, z1.s, #0x1
|
|
; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 4 x i1> @llvm.experimental.vector.splice.nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b, i32 -1)
|
|
ret <vscale x 4 x i1> %res
|
|
}
|
|
|
|
; Ensure predicate based splice is promoted to use ZPRs.
|
|
define <vscale x 8 x i1> @splice_nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: lastb h0, p0, z0.h
|
|
; CHECK-NEXT: mov z1.h, p1/z, #1 // =0x1
|
|
; CHECK-NEXT: insr z1.h, h0
|
|
; CHECK-NEXT: and z1.h, z1.h, #0x1
|
|
; CHECK-NEXT: cmpne p0.h, p0/z, z1.h, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x i1> @llvm.experimental.vector.splice.nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b, i32 -1)
|
|
ret <vscale x 8 x i1> %res
|
|
}
|
|
|
|
; Ensure predicate based splice is promoted to use ZPRs.
|
|
define <vscale x 16 x i1> @splice_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv16i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: ptrue p0.b
|
|
; CHECK-NEXT: lastb b0, p0, z0.b
|
|
; CHECK-NEXT: mov z1.b, p1/z, #1 // =0x1
|
|
; CHECK-NEXT: insr z1.b, b0
|
|
; CHECK-NEXT: and z1.b, z1.b, #0x1
|
|
; CHECK-NEXT: cmpne p0.b, p0/z, z1.b, #0
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 16 x i1> @llvm.experimental.vector.splice.nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, i32 -1)
|
|
ret <vscale x 16 x i1> %res
|
|
}
|
|
|
|
; Verify promote type legalisation works as expected.
|
|
define <vscale x 2 x i8> @splice_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv2i8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-2
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
|
|
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #1
|
|
; CHECK-NEXT: mov x9, #-2
|
|
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, x9, lsl #3]
|
|
; CHECK-NEXT: addvl sp, sp, #2
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 2 x i8> @llvm.experimental.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 -2)
|
|
ret <vscale x 2 x i8> %res
|
|
}
|
|
|
|
; Verify splitvec type legalisation works as expected.
|
|
define <vscale x 8 x i32> @splice_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv8i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-4
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov x9, #-8
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z3.s }, p0, [x8, #3, mul vl]
|
|
; CHECK-NEXT: st1w { z2.s }, p0, [x8, #2, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #2
|
|
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, x9, lsl #2]
|
|
; CHECK-NEXT: sub x8, x8, #32
|
|
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x8, #1, mul vl]
|
|
; CHECK-NEXT: addvl sp, sp, #4
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 8 x i32> @llvm.experimental.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 -8)
|
|
ret <vscale x 8 x i32> %res
|
|
}
|
|
|
|
; Verify splitvec type legalisation works as expected.
|
|
define <vscale x 16 x float> @splice_nxv16f32_clamped(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #0 {
|
|
; CHECK-LABEL: splice_nxv16f32_clamped:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: addvl sp, sp, #-8
|
|
; CHECK-NEXT: rdvl x9, #4
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov x8, sp
|
|
; CHECK-NEXT: mov w10, #68
|
|
; CHECK-NEXT: cmp x9, #68
|
|
; CHECK-NEXT: st1w { z3.s }, p0, [x8, #3, mul vl]
|
|
; CHECK-NEXT: st1w { z2.s }, p0, [x8, #2, mul vl]
|
|
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
|
|
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
|
|
; CHECK-NEXT: st1w { z7.s }, p0, [x8, #7, mul vl]
|
|
; CHECK-NEXT: st1w { z4.s }, p0, [x8, #4, mul vl]
|
|
; CHECK-NEXT: st1w { z5.s }, p0, [x8, #5, mul vl]
|
|
; CHECK-NEXT: st1w { z6.s }, p0, [x8, #6, mul vl]
|
|
; CHECK-NEXT: addvl x8, x8, #4
|
|
; CHECK-NEXT: csel x9, x9, x10, lo
|
|
; CHECK-NEXT: sub x8, x8, x9
|
|
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8]
|
|
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x8, #1, mul vl]
|
|
; CHECK-NEXT: ld1w { z2.s }, p0/z, [x8, #2, mul vl]
|
|
; CHECK-NEXT: ld1w { z3.s }, p0/z, [x8, #3, mul vl]
|
|
; CHECK-NEXT: addvl sp, sp, #8
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: ret
|
|
%res = call <vscale x 16 x float> @llvm.experimental.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 -17)
|
|
ret <vscale x 16 x float> %res
|
|
}
|
|
|
|
declare <vscale x 2 x i1> @llvm.experimental.vector.splice.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, i32)
|
|
declare <vscale x 4 x i1> @llvm.experimental.vector.splice.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, i32)
|
|
declare <vscale x 8 x i1> @llvm.experimental.vector.splice.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, i32)
|
|
declare <vscale x 16 x i1> @llvm.experimental.vector.splice.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, i32)
|
|
declare <vscale x 2 x i8> @llvm.experimental.vector.splice.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, i32)
|
|
declare <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, i32)
|
|
declare <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, i32)
|
|
declare <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
|
|
declare <vscale x 8 x i32> @llvm.experimental.vector.splice.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, i32)
|
|
declare <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i32)
|
|
declare <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, i32)
|
|
declare <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, i32)
|
|
declare <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, i32)
|
|
declare <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i32)
|
|
declare <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, i32)
|
|
declare <vscale x 16 x float> @llvm.experimental.vector.splice.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, i32)
|
|
declare <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, i32)
|
|
|
|
attributes #0 = { nounwind "target-features"="+sve" }
|