[RISCV] Merge vector tests for rv32 and rv64 into a single test file

These tests have nearly identical content the only difference is
that the rv64 test has a signext attribute on some parameters.
That attribute should be harmless on rv32.

Merge them into a single test file with 2 RUN lines.

Differential Revision: https://reviews.llvm.org/D112242
This commit is contained in:
Craig Topper 2021-10-21 10:22:02 -07:00
parent ba02586fbe
commit ce7b8343be
40 changed files with 2621 additions and 18237 deletions

View File

@ -1,837 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vadd_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vadd_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vadd_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 -1, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vadd_vx_nxv1i8_1(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv1i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 2, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
; Test constant adds to see if we can optimize them away for scalable vectors.
define <vscale x 1 x i8> @vadd_ii_nxv1i8_1() {
; CHECK-LABEL: vadd_ii_nxv1i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.i v8, 5
; CHECK-NEXT: ret
%heada = insertelement <vscale x 1 x i8> undef, i8 2, i32 0
%splata = shufflevector <vscale x 1 x i8> %heada, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%headb = insertelement <vscale x 1 x i8> undef, i8 3, i32 0
%splatb = shufflevector <vscale x 1 x i8> %headb, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i8> %splata, %splatb
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vadd_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vadd_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vadd_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 -1, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vadd_vx_nxv2i8_1(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv2i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 2, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vadd_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vadd_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vadd_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 -1, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vadd_vx_nxv4i8_1(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv4i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 2, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vadd_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vadd_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vadd_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -1, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vadd_vx_nxv8i8_1(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv8i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 2, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vadd_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vadd_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = add <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vadd_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 -1, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = add <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vadd_vx_nxv16i8_1(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv16i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 2, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = add <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vadd_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vadd_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = add <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vadd_vx_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 -1, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = add <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vadd_vx_nxv32i8_1(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv32i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 2, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = add <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vadd_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vadd_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = add <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vadd_vx_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 -1, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = add <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vadd_vx_nxv64i8_1(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vadd_vx_nxv64i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 2, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = add <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vadd_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vadd_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vadd_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -1, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vadd_vx_nxv1i16_1(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv1i16_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 2, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vadd_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vadd_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vadd_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -1, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vadd_vx_nxv2i16_1(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv2i16_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 2, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vadd_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vadd_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vadd_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -1, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vadd_vx_nxv4i16_1(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv4i16_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 2, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vadd_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vadd_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vadd_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -1, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vadd_vx_nxv8i16_1(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv8i16_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 2, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vadd_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vadd_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = add <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vadd_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -1, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = add <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vadd_vx_nxv16i16_1(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv16i16_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 2, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = add <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vadd_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vadd_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = add <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vadd_vx_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -1, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = add <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vadd_vx_nxv32i16_1(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vadd_vx_nxv32i16_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 2, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = add <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vadd_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vadd_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vadd_vx_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vadd_vx_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -1, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vadd_vx_nxv1i32_1(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vadd_vx_nxv1i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 2, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vadd_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vadd_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vadd_vx_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vadd_vx_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -1, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vadd_vx_nxv2i32_1(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vadd_vx_nxv2i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 2, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vadd_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vadd_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vadd_vx_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vadd_vx_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -1, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vadd_vx_nxv4i32_1(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vadd_vx_nxv4i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 2, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vadd_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vadd_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vadd_vx_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vadd_vx_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -1, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vadd_vx_nxv8i32_1(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vadd_vx_nxv8i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 2, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vadd_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vadd_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = add <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vadd_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vadd_vx_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -1, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = add <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vadd_vx_nxv16i32_1(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vadd_vx_nxv16i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 2, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = add <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vadd_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vadd_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vadd_vx_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -1, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vadd_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vadd_vx_nxv1i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vadd_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vadd_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vadd_vx_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -1, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vadd_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vadd_vx_nxv2i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vadd_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vadd_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vadd_vx_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -1, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vadd_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vadd_vx_nxv4i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vadd_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vadd_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vadd_vx_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -1, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vadd_vx_nxv8i64_1(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vadd_vx_nxv8i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vadd_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vadd_vx_nxv1i8:
@ -665,11 +666,24 @@ define <vscale x 16 x i32> @vadd_vx_nxv16i32_1(<vscale x 16 x i32> %va) {
}
define <vscale x 1 x i64> @vadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vadd_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vadd_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vadd.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = add <vscale x 1 x i64> %va, %splat
@ -701,11 +715,24 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
}
define <vscale x 2 x i64> @vadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vadd_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vadd_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vadd.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = add <vscale x 2 x i64> %va, %splat
@ -737,11 +764,24 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
}
define <vscale x 4 x i64> @vadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vadd_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vadd_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vadd.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = add <vscale x 4 x i64> %va, %splat
@ -773,11 +813,24 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
}
define <vscale x 8 x i64> @vadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vadd_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vadd.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vadd_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vadd.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = add <vscale x 8 x i64> %va, %splat

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vand_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vand_vv_nxv1i8:
@ -1074,11 +1075,24 @@ define <vscale x 1 x i64> @vand_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
}
define <vscale x 1 x i64> @vand_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vand_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vand_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vand.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = and <vscale x 1 x i64> %va, %splat
@ -1133,11 +1147,24 @@ define <vscale x 2 x i64> @vand_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
}
define <vscale x 2 x i64> @vand_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vand_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vand_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vand.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = and <vscale x 2 x i64> %va, %splat
@ -1192,11 +1219,24 @@ define <vscale x 4 x i64> @vand_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
}
define <vscale x 4 x i64> @vand_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vand_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vand_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vand.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = and <vscale x 4 x i64> %va, %splat
@ -1251,11 +1291,24 @@ define <vscale x 8 x i64> @vand_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
}
define <vscale x 8 x i64> @vand_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vand_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vand.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vand_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vand.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = and <vscale x 8 x i64> %va, %splat

View File

@ -1,894 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vdiv_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vdiv_vv_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sdiv <vscale x 1 x i8> %va, %vb
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vdiv_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vdiv_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vdiv_vi_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v8, v9, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v9, v8, 7
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vdiv_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vdiv_vv_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sdiv <vscale x 2 x i8> %va, %vb
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vdiv_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vdiv_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vdiv_vi_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v8, v9, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v9, v8, 7
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vdiv_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vdiv_vv_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sdiv <vscale x 4 x i8> %va, %vb
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vdiv_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vdiv_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vdiv_vi_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v8, v9, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v9, v8, 7
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vdiv_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vdiv_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sdiv <vscale x 8 x i8> %va, %vb
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vdiv_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vdiv_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vdiv_vi_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v8, v9, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v9, v8, 7
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vdiv_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
; CHECK-LABEL: vdiv_vv_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = sdiv <vscale x 16 x i8> %va, %vb
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vdiv_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = sdiv <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vdiv_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vdiv_vi_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmulh.vx v10, v8, a0
; CHECK-NEXT: vsub.vv v8, v10, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v10, v8, 7
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = sdiv <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vdiv_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
; CHECK-LABEL: vdiv_vv_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = sdiv <vscale x 32 x i8> %va, %vb
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vdiv_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = sdiv <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vdiv_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vdiv_vi_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmulh.vx v12, v8, a0
; CHECK-NEXT: vsub.vv v8, v12, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v12, v8, 7
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = sdiv <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vdiv_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
; CHECK-LABEL: vdiv_vv_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = sdiv <vscale x 64 x i8> %va, %vb
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vdiv_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = sdiv <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vdiv_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vdiv_vi_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmulh.vx v16, v8, a0
; CHECK-NEXT: vsub.vv v8, v16, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v16, v8, 7
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = sdiv <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vdiv_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vdiv_vv_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sdiv <vscale x 1 x i16> %va, %vb
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vdiv_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vdiv_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v9, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vdiv_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vdiv_vv_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sdiv <vscale x 2 x i16> %va, %vb
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vdiv_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vdiv_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v9, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vdiv_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vdiv_vv_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sdiv <vscale x 4 x i16> %va, %vb
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vdiv_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vdiv_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v9, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vdiv_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vdiv_vv_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = sdiv <vscale x 8 x i16> %va, %vb
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vdiv_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vdiv_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v10, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vdiv_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
; CHECK-LABEL: vdiv_vv_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = sdiv <vscale x 16 x i16> %va, %vb
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vdiv_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = sdiv <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vdiv_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v12, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = sdiv <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vdiv_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
; CHECK-LABEL: vdiv_vv_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = sdiv <vscale x 32 x i16> %va, %vb
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vdiv_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = sdiv <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vdiv_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v16, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = sdiv <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vdiv_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vdiv_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sdiv <vscale x 1 x i32> %va, %vb
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vdiv_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vdiv_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vdiv_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addiw a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v8, v9, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v9, v8, 31
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vdiv_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
; CHECK-LABEL: vdiv_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sdiv <vscale x 2 x i32> %va, %vb
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vdiv_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vdiv_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vdiv_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addiw a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v8, v9, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v9, v8, 31
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vdiv_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
; CHECK-LABEL: vdiv_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = sdiv <vscale x 4 x i32> %va, %vb
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vdiv_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vdiv_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vdiv_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addiw a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmulh.vx v10, v8, a0
; CHECK-NEXT: vsub.vv v8, v10, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v10, v8, 31
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vdiv_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
; CHECK-LABEL: vdiv_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = sdiv <vscale x 8 x i32> %va, %vb
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vdiv_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vdiv_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vdiv_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addiw a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmulh.vx v12, v8, a0
; CHECK-NEXT: vsub.vv v8, v12, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v12, v8, 31
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vdiv_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
; CHECK-LABEL: vdiv_vv_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = sdiv <vscale x 16 x i32> %va, %vb
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vdiv_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = sdiv <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vdiv_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vdiv_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addiw a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmulh.vx v16, v8, a0
; CHECK-NEXT: vsub.vv v8, v16, v8
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vsrl.vi v16, v8, 31
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = sdiv <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vdiv_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
; CHECK-LABEL: vdiv_vv_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sdiv <vscale x 1 x i64> %va, %vb
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vdiv_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vdiv_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vdiv_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1029851
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v9, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vdiv_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
; CHECK-LABEL: vdiv_vv_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = sdiv <vscale x 2 x i64> %va, %vb
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vdiv_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vdiv_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vdiv_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1029851
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v10, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vdiv_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
; CHECK-LABEL: vdiv_vv_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = sdiv <vscale x 4 x i64> %va, %vb
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vdiv_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vdiv_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vdiv_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1029851
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v12, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vdiv_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vdiv_vv_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vdiv.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = sdiv <vscale x 8 x i64> %va, %vb
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vdiv_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vdiv_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vdiv.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vdiv_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1029851
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v16, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vdiv_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vdiv_vv_nxv1i8:
@ -321,16 +322,27 @@ define <vscale x 1 x i16> @vdiv_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %
}
define <vscale x 1 x i16> @vdiv_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v9, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv1i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; RV32-NEXT: vmulh.vx v8, v8, a0
; RV32-NEXT: vsra.vi v8, v8, 1
; RV32-NEXT: vsrl.vi v9, v8, 15
; RV32-NEXT: vadd.vv v8, v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv1i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; RV64-NEXT: vmulh.vx v8, v8, a0
; RV64-NEXT: vsra.vi v8, v8, 1
; RV64-NEXT: vsrl.vi v9, v8, 15
; RV64-NEXT: vadd.vv v8, v8, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i16> %va, %splat
@ -360,16 +372,27 @@ define <vscale x 2 x i16> @vdiv_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %
}
define <vscale x 2 x i16> @vdiv_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v9, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv2i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; RV32-NEXT: vmulh.vx v8, v8, a0
; RV32-NEXT: vsra.vi v8, v8, 1
; RV32-NEXT: vsrl.vi v9, v8, 15
; RV32-NEXT: vadd.vv v8, v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv2i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; RV64-NEXT: vmulh.vx v8, v8, a0
; RV64-NEXT: vsra.vi v8, v8, 1
; RV64-NEXT: vsrl.vi v9, v8, 15
; RV64-NEXT: vadd.vv v8, v8, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i16> %va, %splat
@ -399,16 +422,27 @@ define <vscale x 4 x i16> @vdiv_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %
}
define <vscale x 4 x i16> @vdiv_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v9, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv4i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; RV32-NEXT: vmulh.vx v8, v8, a0
; RV32-NEXT: vsra.vi v8, v8, 1
; RV32-NEXT: vsrl.vi v9, v8, 15
; RV32-NEXT: vadd.vv v8, v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv4i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; RV64-NEXT: vmulh.vx v8, v8, a0
; RV64-NEXT: vsra.vi v8, v8, 1
; RV64-NEXT: vsrl.vi v9, v8, 15
; RV64-NEXT: vadd.vv v8, v8, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i16> %va, %splat
@ -438,16 +472,27 @@ define <vscale x 8 x i16> @vdiv_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %
}
define <vscale x 8 x i16> @vdiv_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v10, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv8i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; RV32-NEXT: vmulh.vx v8, v8, a0
; RV32-NEXT: vsra.vi v8, v8, 1
; RV32-NEXT: vsrl.vi v10, v8, 15
; RV32-NEXT: vadd.vv v8, v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv8i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; RV64-NEXT: vmulh.vx v8, v8, a0
; RV64-NEXT: vsra.vi v8, v8, 1
; RV64-NEXT: vsrl.vi v10, v8, 15
; RV64-NEXT: vadd.vv v8, v8, v10
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i16> %va, %splat
@ -477,16 +522,27 @@ define <vscale x 16 x i16> @vdiv_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signex
}
define <vscale x 16 x i16> @vdiv_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v12, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv16i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; RV32-NEXT: vmulh.vx v8, v8, a0
; RV32-NEXT: vsra.vi v8, v8, 1
; RV32-NEXT: vsrl.vi v12, v8, 15
; RV32-NEXT: vadd.vv v8, v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv16i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; RV64-NEXT: vmulh.vx v8, v8, a0
; RV64-NEXT: vsra.vi v8, v8, 1
; RV64-NEXT: vsrl.vi v12, v8, 15
; RV64-NEXT: vadd.vv v8, v8, v12
; RV64-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = sdiv <vscale x 16 x i16> %va, %splat
@ -516,16 +572,27 @@ define <vscale x 32 x i16> @vdiv_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signex
}
define <vscale x 32 x i16> @vdiv_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vdiv_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vsrl.vi v16, v8, 15
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv32i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; RV32-NEXT: vmulh.vx v8, v8, a0
; RV32-NEXT: vsra.vi v8, v8, 1
; RV32-NEXT: vsrl.vi v16, v8, 15
; RV32-NEXT: vadd.vv v8, v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv32i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; RV64-NEXT: vmulh.vx v8, v8, a0
; RV64-NEXT: vsra.vi v8, v8, 1
; RV64-NEXT: vsrl.vi v16, v8, 15
; RV64-NEXT: vadd.vv v8, v8, v16
; RV64-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = sdiv <vscale x 32 x i16> %va, %splat
@ -542,7 +609,7 @@ define <vscale x 1 x i32> @vdiv_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vdiv_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
define <vscale x 1 x i32> @vdiv_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
@ -555,17 +622,29 @@ define <vscale x 1 x i32> @vdiv_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
}
define <vscale x 1 x i32> @vdiv_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vdiv_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addi a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v8, v9, v8
; CHECK-NEXT: vsrl.vi v9, v8, 31
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv1i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 449390
; RV32-NEXT: addi a0, a0, -1171
; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; RV32-NEXT: vmulh.vx v9, v8, a0
; RV32-NEXT: vsub.vv v8, v9, v8
; RV32-NEXT: vsrl.vi v9, v8, 31
; RV32-NEXT: vsra.vi v8, v8, 2
; RV32-NEXT: vadd.vv v8, v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv1i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 449390
; RV64-NEXT: addiw a0, a0, -1171
; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; RV64-NEXT: vmulh.vx v9, v8, a0
; RV64-NEXT: vsub.vv v8, v9, v8
; RV64-NEXT: vsra.vi v8, v8, 2
; RV64-NEXT: vsrl.vi v9, v8, 31
; RV64-NEXT: vadd.vv v8, v8, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i32> %va, %splat
@ -582,7 +661,7 @@ define <vscale x 2 x i32> @vdiv_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vdiv_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
define <vscale x 2 x i32> @vdiv_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
@ -595,17 +674,29 @@ define <vscale x 2 x i32> @vdiv_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
}
define <vscale x 2 x i32> @vdiv_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vdiv_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addi a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v8, v9, v8
; CHECK-NEXT: vsrl.vi v9, v8, 31
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv2i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 449390
; RV32-NEXT: addi a0, a0, -1171
; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; RV32-NEXT: vmulh.vx v9, v8, a0
; RV32-NEXT: vsub.vv v8, v9, v8
; RV32-NEXT: vsrl.vi v9, v8, 31
; RV32-NEXT: vsra.vi v8, v8, 2
; RV32-NEXT: vadd.vv v8, v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv2i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 449390
; RV64-NEXT: addiw a0, a0, -1171
; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; RV64-NEXT: vmulh.vx v9, v8, a0
; RV64-NEXT: vsub.vv v8, v9, v8
; RV64-NEXT: vsra.vi v8, v8, 2
; RV64-NEXT: vsrl.vi v9, v8, 31
; RV64-NEXT: vadd.vv v8, v8, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i32> %va, %splat
@ -622,7 +713,7 @@ define <vscale x 4 x i32> @vdiv_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vdiv_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
define <vscale x 4 x i32> @vdiv_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
@ -635,17 +726,29 @@ define <vscale x 4 x i32> @vdiv_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
}
define <vscale x 4 x i32> @vdiv_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vdiv_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addi a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmulh.vx v10, v8, a0
; CHECK-NEXT: vsub.vv v8, v10, v8
; CHECK-NEXT: vsrl.vi v10, v8, 31
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv4i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 449390
; RV32-NEXT: addi a0, a0, -1171
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vmulh.vx v10, v8, a0
; RV32-NEXT: vsub.vv v8, v10, v8
; RV32-NEXT: vsrl.vi v10, v8, 31
; RV32-NEXT: vsra.vi v8, v8, 2
; RV32-NEXT: vadd.vv v8, v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv4i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 449390
; RV64-NEXT: addiw a0, a0, -1171
; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV64-NEXT: vmulh.vx v10, v8, a0
; RV64-NEXT: vsub.vv v8, v10, v8
; RV64-NEXT: vsra.vi v8, v8, 2
; RV64-NEXT: vsrl.vi v10, v8, 31
; RV64-NEXT: vadd.vv v8, v8, v10
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i32> %va, %splat
@ -662,7 +765,7 @@ define <vscale x 8 x i32> @vdiv_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vdiv_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
define <vscale x 8 x i32> @vdiv_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
@ -675,17 +778,29 @@ define <vscale x 8 x i32> @vdiv_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
}
define <vscale x 8 x i32> @vdiv_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vdiv_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addi a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmulh.vx v12, v8, a0
; CHECK-NEXT: vsub.vv v8, v12, v8
; CHECK-NEXT: vsrl.vi v12, v8, 31
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv8i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 449390
; RV32-NEXT: addi a0, a0, -1171
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vmulh.vx v12, v8, a0
; RV32-NEXT: vsub.vv v8, v12, v8
; RV32-NEXT: vsrl.vi v12, v8, 31
; RV32-NEXT: vsra.vi v8, v8, 2
; RV32-NEXT: vadd.vv v8, v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv8i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 449390
; RV64-NEXT: addiw a0, a0, -1171
; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV64-NEXT: vmulh.vx v12, v8, a0
; RV64-NEXT: vsub.vv v8, v12, v8
; RV64-NEXT: vsra.vi v8, v8, 2
; RV64-NEXT: vsrl.vi v12, v8, 31
; RV64-NEXT: vadd.vv v8, v8, v12
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i32> %va, %splat
@ -702,7 +817,7 @@ define <vscale x 16 x i32> @vdiv_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vdiv_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
define <vscale x 16 x i32> @vdiv_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdiv_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
@ -715,17 +830,29 @@ define <vscale x 16 x i32> @vdiv_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
}
define <vscale x 16 x i32> @vdiv_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vdiv_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addi a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmulh.vx v16, v8, a0
; CHECK-NEXT: vsub.vv v8, v16, v8
; CHECK-NEXT: vsrl.vi v16, v8, 31
; CHECK-NEXT: vsra.vi v8, v8, 2
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv16i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 449390
; RV32-NEXT: addi a0, a0, -1171
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; RV32-NEXT: vmulh.vx v16, v8, a0
; RV32-NEXT: vsub.vv v8, v16, v8
; RV32-NEXT: vsrl.vi v16, v8, 31
; RV32-NEXT: vsra.vi v8, v8, 2
; RV32-NEXT: vadd.vv v8, v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv16i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 449390
; RV64-NEXT: addiw a0, a0, -1171
; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; RV64-NEXT: vmulh.vx v16, v8, a0
; RV64-NEXT: vsub.vv v8, v16, v8
; RV64-NEXT: vsra.vi v8, v8, 2
; RV64-NEXT: vsrl.vi v16, v8, 31
; RV64-NEXT: vadd.vv v8, v8, v16
; RV64-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = sdiv <vscale x 16 x i32> %va, %splat
@ -743,18 +870,24 @@ define <vscale x 1 x i64> @vdiv_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
}
define <vscale x 1 x i64> @vdiv_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vdiv_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vdiv.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i64> %va, %splat
@ -762,26 +895,44 @@ define <vscale x 1 x i64> @vdiv_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
}
define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vdiv_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 748983
; CHECK-NEXT: addi a0, a0, -586
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: lui a0, 898779
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vmulh.vv v8, v8, v9
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v9, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv1i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 748983
; RV32-NEXT: addi a0, a0, -586
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: lui a0, 898779
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmulh.vv v8, v8, v9
; RV32-NEXT: addi a0, zero, 63
; RV32-NEXT: vsrl.vx v9, v8, a0
; RV32-NEXT: vsra.vi v8, v8, 1
; RV32-NEXT: vadd.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv1i64_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1029851
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vmulh.vx v8, v8, a0
; RV64-NEXT: addi a0, zero, 63
; RV64-NEXT: vsrl.vx v9, v8, a0
; RV64-NEXT: vsra.vi v8, v8, 1
; RV64-NEXT: vadd.vv v8, v8, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = sdiv <vscale x 1 x i64> %va, %splat
@ -799,18 +950,24 @@ define <vscale x 2 x i64> @vdiv_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
}
define <vscale x 2 x i64> @vdiv_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vdiv_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vdiv.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vdiv.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i64> %va, %splat
@ -818,26 +975,44 @@ define <vscale x 2 x i64> @vdiv_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
}
define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vdiv_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 748983
; CHECK-NEXT: addi a0, a0, -586
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: lui a0, 898779
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vmulh.vv v8, v8, v10
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v10, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv2i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 748983
; RV32-NEXT: addi a0, a0, -586
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: lui a0, 898779
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmulh.vv v8, v8, v10
; RV32-NEXT: addi a0, zero, 63
; RV32-NEXT: vsrl.vx v10, v8, a0
; RV32-NEXT: vsra.vi v8, v8, 1
; RV32-NEXT: vadd.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv2i64_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1029851
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vmulh.vx v8, v8, a0
; RV64-NEXT: addi a0, zero, 63
; RV64-NEXT: vsrl.vx v10, v8, a0
; RV64-NEXT: vsra.vi v8, v8, 1
; RV64-NEXT: vadd.vv v8, v8, v10
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = sdiv <vscale x 2 x i64> %va, %splat
@ -855,18 +1030,24 @@ define <vscale x 4 x i64> @vdiv_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
}
define <vscale x 4 x i64> @vdiv_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vdiv_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vdiv.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vdiv.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i64> %va, %splat
@ -874,26 +1055,44 @@ define <vscale x 4 x i64> @vdiv_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
}
define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vdiv_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 748983
; CHECK-NEXT: addi a0, a0, -586
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: lui a0, 898779
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vmulh.vv v8, v8, v12
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v12, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv4i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 748983
; RV32-NEXT: addi a0, a0, -586
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: lui a0, 898779
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmulh.vv v8, v8, v12
; RV32-NEXT: addi a0, zero, 63
; RV32-NEXT: vsrl.vx v12, v8, a0
; RV32-NEXT: vsra.vi v8, v8, 1
; RV32-NEXT: vadd.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv4i64_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1029851
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vmulh.vx v8, v8, a0
; RV64-NEXT: addi a0, zero, 63
; RV64-NEXT: vsrl.vx v12, v8, a0
; RV64-NEXT: vsra.vi v8, v8, 1
; RV64-NEXT: vadd.vv v8, v8, v12
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = sdiv <vscale x 4 x i64> %va, %splat
@ -911,18 +1110,24 @@ define <vscale x 8 x i64> @vdiv_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
}
define <vscale x 8 x i64> @vdiv_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vdiv_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vdiv.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vdiv.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i64> %va, %splat
@ -930,26 +1135,44 @@ define <vscale x 8 x i64> @vdiv_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
}
define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vdiv_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 748983
; CHECK-NEXT: addi a0, a0, -586
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: lui a0, 898779
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vmulh.vv v8, v8, v16
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v16, v8, a0
; CHECK-NEXT: vsra.vi v8, v8, 1
; CHECK-NEXT: vadd.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdiv_vi_nxv8i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 748983
; RV32-NEXT: addi a0, a0, -586
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: lui a0, 898779
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmulh.vv v8, v8, v16
; RV32-NEXT: addi a0, zero, 63
; RV32-NEXT: vsrl.vx v16, v8, a0
; RV32-NEXT: vsra.vi v8, v8, 1
; RV32-NEXT: vadd.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vi_nxv8i64_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1029851
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vmulh.vx v8, v8, a0
; RV64-NEXT: addi a0, zero, 63
; RV64-NEXT: vsrl.vx v16, v8, a0
; RV64-NEXT: vsra.vi v8, v8, 1
; RV64-NEXT: vadd.vv v8, v8, v16
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = sdiv <vscale x 8 x i64> %va, %splat

View File

@ -1,925 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vdivu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vdivu_vv_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = udiv <vscale x 1 x i8> %va, %vb
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vdivu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vdivu_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vdivu_vi_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 33
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 5
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vdivu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vdivu_vv_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = udiv <vscale x 2 x i8> %va, %vb
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vdivu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vdivu_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vdivu_vi_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 33
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 5
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vdivu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vdivu_vv_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = udiv <vscale x 4 x i8> %va, %vb
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vdivu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vdivu_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vdivu_vi_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 33
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 5
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vdivu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vdivu_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = udiv <vscale x 8 x i8> %va, %vb
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vdivu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vdivu_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vdivu_vi_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 33
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 5
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vdivu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
; CHECK-LABEL: vdivu_vv_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = udiv <vscale x 16 x i8> %va, %vb
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vdivu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = udiv <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vdivu_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vdivu_vi_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 33
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 5
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = udiv <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vdivu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
; CHECK-LABEL: vdivu_vv_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = udiv <vscale x 32 x i8> %va, %vb
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vdivu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = udiv <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vdivu_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vdivu_vi_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 33
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 5
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = udiv <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vdivu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
; CHECK-LABEL: vdivu_vv_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = udiv <vscale x 64 x i8> %va, %vb
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vdivu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = udiv <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vdivu_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vdivu_vi_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 33
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 5
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = udiv <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vdivu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vdivu_vv_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = udiv <vscale x 1 x i16> %va, %vb
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vdivu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vdivu_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vdivu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vdivu_vv_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = udiv <vscale x 2 x i16> %va, %vb
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vdivu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vdivu_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vdivu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vdivu_vv_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = udiv <vscale x 4 x i16> %va, %vb
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vdivu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vdivu_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vdivu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vdivu_vv_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = udiv <vscale x 8 x i16> %va, %vb
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vdivu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vdivu_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vdivu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
; CHECK-LABEL: vdivu_vv_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = udiv <vscale x 16 x i16> %va, %vb
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vdivu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = udiv <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vdivu_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = udiv <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vdivu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
; CHECK-LABEL: vdivu_vv_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = udiv <vscale x 32 x i16> %va, %vb
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vdivu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = udiv <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vdivu_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = udiv <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vdivu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vdivu_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = udiv <vscale x 1 x i32> %va, %vb
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vdivu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vdivu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vdivu_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 29
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vdivu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
; CHECK-LABEL: vdivu_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = udiv <vscale x 2 x i32> %va, %vb
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vdivu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vdivu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vdivu_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 29
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vdivu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
; CHECK-LABEL: vdivu_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = udiv <vscale x 4 x i32> %va, %vb
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vdivu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vdivu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vdivu_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 29
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vdivu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
; CHECK-LABEL: vdivu_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = udiv <vscale x 8 x i32> %va, %vb
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vdivu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vdivu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vdivu_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 29
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vdivu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
; CHECK-LABEL: vdivu_vv_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = udiv <vscale x 16 x i32> %va, %vb
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vdivu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = udiv <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vdivu_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vdivu_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 29
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = udiv <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vdivu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
; CHECK-LABEL: vdivu_vv_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = udiv <vscale x 1 x i64> %va, %vb
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vdivu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vdivu_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vdivu_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: slli a0, a0, 61
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vdivu_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv1i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 1
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
; fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) if c is power of 2
define <vscale x 1 x i64> @vdivu_vi_nxv1i64_2(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
; CHECK-LABEL: vdivu_vi_nxv1i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vadd.vi v9, v9, 4
; CHECK-NEXT: vsrl.vv v8, v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = shl <vscale x 1 x i64> %splat, %vb
%vd = udiv <vscale x 1 x i64> %va, %vc
ret <vscale x 1 x i64> %vd
}
define <vscale x 2 x i64> @vdivu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
; CHECK-LABEL: vdivu_vv_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = udiv <vscale x 2 x i64> %va, %vb
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vdivu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vdivu_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vdivu_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: slli a0, a0, 61
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vdivu_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv2i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 1
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
; fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) if c is power of 2
define <vscale x 2 x i64> @vdivu_vi_nxv2i64_2(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
; CHECK-LABEL: vdivu_vi_nxv2i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vadd.vi v10, v10, 4
; CHECK-NEXT: vsrl.vv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = shl <vscale x 2 x i64> %splat, %vb
%vd = udiv <vscale x 2 x i64> %va, %vc
ret <vscale x 2 x i64> %vd
}
define <vscale x 4 x i64> @vdivu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
; CHECK-LABEL: vdivu_vv_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = udiv <vscale x 4 x i64> %va, %vb
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vdivu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vdivu_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vdivu_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: slli a0, a0, 61
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vdivu_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv4i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 1
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
; fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) if c is power of 2
define <vscale x 4 x i64> @vdivu_vi_nxv4i64_2(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
; CHECK-LABEL: vdivu_vi_nxv4i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vadd.vi v12, v12, 4
; CHECK-NEXT: vsrl.vv v8, v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = shl <vscale x 4 x i64> %splat, %vb
%vd = udiv <vscale x 4 x i64> %va, %vc
ret <vscale x 4 x i64> %vd
}
define <vscale x 8 x i64> @vdivu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vdivu_vv_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vdivu.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = udiv <vscale x 8 x i64> %va, %vb
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vdivu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vdivu_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vdivu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vdivu_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: slli a0, a0, 61
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vdivu_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv8i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
; fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) if c is power of 2
define <vscale x 8 x i64> @vdivu_vi_nxv8i64_2(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vdivu_vi_nxv8i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vadd.vi v16, v16, 4
; CHECK-NEXT: vsrl.vv v8, v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = shl <vscale x 8 x i64> %splat, %vb
%vd = udiv <vscale x 8 x i64> %va, %vc
ret <vscale x 8 x i64> %vd
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vdivu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vdivu_vv_nxv1i8:
@ -300,14 +301,23 @@ define <vscale x 1 x i16> @vdivu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext
}
define <vscale x 1 x i16> @vdivu_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv1i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; RV32-NEXT: vmulhu.vx v8, v8, a0
; RV32-NEXT: vsrl.vi v8, v8, 13
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv1i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: vsrl.vi v8, v8, 13
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i16> %va, %splat
@ -337,14 +347,23 @@ define <vscale x 2 x i16> @vdivu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext
}
define <vscale x 2 x i16> @vdivu_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv2i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; RV32-NEXT: vmulhu.vx v8, v8, a0
; RV32-NEXT: vsrl.vi v8, v8, 13
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv2i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: vsrl.vi v8, v8, 13
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i16> %va, %splat
@ -374,14 +393,23 @@ define <vscale x 4 x i16> @vdivu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext
}
define <vscale x 4 x i16> @vdivu_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv4i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; RV32-NEXT: vmulhu.vx v8, v8, a0
; RV32-NEXT: vsrl.vi v8, v8, 13
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv4i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: vsrl.vi v8, v8, 13
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i16> %va, %splat
@ -411,14 +439,23 @@ define <vscale x 8 x i16> @vdivu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext
}
define <vscale x 8 x i16> @vdivu_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv8i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; RV32-NEXT: vmulhu.vx v8, v8, a0
; RV32-NEXT: vsrl.vi v8, v8, 13
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv8i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: vsrl.vi v8, v8, 13
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i16> %va, %splat
@ -448,14 +485,23 @@ define <vscale x 16 x i16> @vdivu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signe
}
define <vscale x 16 x i16> @vdivu_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv16i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; RV32-NEXT: vmulhu.vx v8, v8, a0
; RV32-NEXT: vsrl.vi v8, v8, 13
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv16i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: vsrl.vi v8, v8, 13
; RV64-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = udiv <vscale x 16 x i16> %va, %splat
@ -485,14 +531,23 @@ define <vscale x 32 x i16> @vdivu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signe
}
define <vscale x 32 x i16> @vdivu_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vdivu_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 13
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv32i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; RV32-NEXT: vmulhu.vx v8, v8, a0
; RV32-NEXT: vsrl.vi v8, v8, 13
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv32i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: vsrl.vi v8, v8, 13
; RV64-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = udiv <vscale x 32 x i16> %va, %splat
@ -509,7 +564,7 @@ define <vscale x 1 x i32> @vdivu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vdivu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
define <vscale x 1 x i32> @vdivu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
@ -522,14 +577,23 @@ define <vscale x 1 x i32> @vdivu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
}
define <vscale x 1 x i32> @vdivu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vdivu_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 29
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv1i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; RV32-NEXT: vmulhu.vx v8, v8, a0
; RV32-NEXT: vsrl.vi v8, v8, 29
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv1i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 131072
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: vsrl.vi v8, v8, 29
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i32> %va, %splat
@ -546,7 +610,7 @@ define <vscale x 2 x i32> @vdivu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vdivu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
define <vscale x 2 x i32> @vdivu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
@ -559,14 +623,23 @@ define <vscale x 2 x i32> @vdivu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
}
define <vscale x 2 x i32> @vdivu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vdivu_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 29
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv2i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; RV32-NEXT: vmulhu.vx v8, v8, a0
; RV32-NEXT: vsrl.vi v8, v8, 29
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv2i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 131072
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: vsrl.vi v8, v8, 29
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i32> %va, %splat
@ -583,7 +656,7 @@ define <vscale x 4 x i32> @vdivu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vdivu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
define <vscale x 4 x i32> @vdivu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
@ -596,14 +669,23 @@ define <vscale x 4 x i32> @vdivu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
}
define <vscale x 4 x i32> @vdivu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vdivu_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 29
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv4i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vmulhu.vx v8, v8, a0
; RV32-NEXT: vsrl.vi v8, v8, 29
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv4i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 131072
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: vsrl.vi v8, v8, 29
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i32> %va, %splat
@ -620,7 +702,7 @@ define <vscale x 8 x i32> @vdivu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vdivu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
define <vscale x 8 x i32> @vdivu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
@ -633,14 +715,23 @@ define <vscale x 8 x i32> @vdivu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
}
define <vscale x 8 x i32> @vdivu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vdivu_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 29
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv8i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vmulhu.vx v8, v8, a0
; RV32-NEXT: vsrl.vi v8, v8, 29
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv8i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 131072
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: vsrl.vi v8, v8, 29
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i32> %va, %splat
@ -657,7 +748,7 @@ define <vscale x 16 x i32> @vdivu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vdivu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
define <vscale x 16 x i32> @vdivu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) {
; CHECK-LABEL: vdivu_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
@ -670,14 +761,23 @@ define <vscale x 16 x i32> @vdivu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
}
define <vscale x 16 x i32> @vdivu_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vdivu_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmulhu.vx v8, v8, a0
; CHECK-NEXT: vsrl.vi v8, v8, 29
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv16i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; RV32-NEXT: vmulhu.vx v8, v8, a0
; RV32-NEXT: vsrl.vi v8, v8, 29
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv16i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 131072
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: vsrl.vi v8, v8, 29
; RV64-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = udiv <vscale x 16 x i32> %va, %splat
@ -695,18 +795,24 @@ define <vscale x 1 x i64> @vdivu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
}
define <vscale x 1 x i64> @vdivu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vdivu_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vdivu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i64> %va, %splat
@ -714,22 +820,33 @@ define <vscale x 1 x i64> @vdivu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
}
define <vscale x 1 x i64> @vdivu_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vmulhu.vv v8, v8, v9
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv1i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmulhu.vv v8, v8, v9
; RV32-NEXT: addi a0, zero, 61
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv1i64_0:
; RV64: # %bb.0:
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: slli a0, a0, 61
; RV64-NEXT: addi a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: addi a0, zero, 61
; RV64-NEXT: vsrl.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = udiv <vscale x 1 x i64> %va, %splat
@ -774,18 +891,24 @@ define <vscale x 2 x i64> @vdivu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
}
define <vscale x 2 x i64> @vdivu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vdivu_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vdivu.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vdivu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i64> %va, %splat
@ -793,22 +916,33 @@ define <vscale x 2 x i64> @vdivu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
}
define <vscale x 2 x i64> @vdivu_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vmulhu.vv v8, v8, v10
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv2i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmulhu.vv v8, v8, v10
; RV32-NEXT: addi a0, zero, 61
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv2i64_0:
; RV64: # %bb.0:
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: slli a0, a0, 61
; RV64-NEXT: addi a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: addi a0, zero, 61
; RV64-NEXT: vsrl.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = udiv <vscale x 2 x i64> %va, %splat
@ -853,18 +987,24 @@ define <vscale x 4 x i64> @vdivu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
}
define <vscale x 4 x i64> @vdivu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vdivu_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vdivu.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vdivu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i64> %va, %splat
@ -872,22 +1012,33 @@ define <vscale x 4 x i64> @vdivu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
}
define <vscale x 4 x i64> @vdivu_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vmulhu.vv v8, v8, v12
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv4i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmulhu.vv v8, v8, v12
; RV32-NEXT: addi a0, zero, 61
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv4i64_0:
; RV64: # %bb.0:
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: slli a0, a0, 61
; RV64-NEXT: addi a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: addi a0, zero, 61
; RV64-NEXT: vsrl.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = udiv <vscale x 4 x i64> %va, %splat
@ -932,18 +1083,24 @@ define <vscale x 8 x i64> @vdivu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
}
define <vscale x 8 x i64> @vdivu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vdivu_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vdivu.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vdivu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i64> %va, %splat
@ -951,22 +1108,33 @@ define <vscale x 8 x i64> @vdivu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
}
define <vscale x 8 x i64> @vdivu_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vdivu_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vmulhu.vv v8, v8, v16
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
; RV32-LABEL: vdivu_vi_nxv8i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmulhu.vv v8, v8, v16
; RV32-NEXT: addi a0, zero, 61
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vi_nxv8i64_0:
; RV64: # %bb.0:
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: slli a0, a0, 61
; RV64-NEXT: addi a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vmulhu.vx v8, v8, a0
; RV64-NEXT: addi a0, zero, 61
; RV64-NEXT: vsrl.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = udiv <vscale x 8 x i64> %va, %splat

View File

@ -1,619 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i16> @vsext_nxv1i8_nxv1i16(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vsext_nxv1i8_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = sext <vscale x 1 x i8> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %evec
}
define <vscale x 1 x i16> @vzext_nxv1i8_nxv1i16(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vzext_nxv1i8_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vzext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = zext <vscale x 1 x i8> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %evec
}
define <vscale x 1 x i32> @vsext_nxv1i8_nxv1i32(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vsext_nxv1i8_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsext.vf4 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = sext <vscale x 1 x i8> %va to <vscale x 1 x i32>
ret <vscale x 1 x i32> %evec
}
define <vscale x 1 x i32> @vzext_nxv1i8_nxv1i32(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vzext_nxv1i8_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vzext.vf4 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = zext <vscale x 1 x i8> %va to <vscale x 1 x i32>
ret <vscale x 1 x i32> %evec
}
define <vscale x 1 x i64> @vsext_nxv1i8_nxv1i64(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vsext_nxv1i8_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsext.vf8 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
ret <vscale x 1 x i64> %evec
}
define <vscale x 1 x i64> @vzext_nxv1i8_nxv1i64(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vzext_nxv1i8_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vzext.vf8 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = zext <vscale x 1 x i8> %va to <vscale x 1 x i64>
ret <vscale x 1 x i64> %evec
}
define <vscale x 2 x i16> @vsext_nxv2i8_nxv2i16(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vsext_nxv2i8_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = sext <vscale x 2 x i8> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %evec
}
define <vscale x 2 x i16> @vzext_nxv2i8_nxv2i16(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vzext_nxv2i8_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vzext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = zext <vscale x 2 x i8> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %evec
}
define <vscale x 2 x i32> @vsext_nxv2i8_nxv2i32(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vsext_nxv2i8_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vsext.vf4 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = sext <vscale x 2 x i8> %va to <vscale x 2 x i32>
ret <vscale x 2 x i32> %evec
}
define <vscale x 2 x i32> @vzext_nxv2i8_nxv2i32(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vzext_nxv2i8_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vzext.vf4 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = zext <vscale x 2 x i8> %va to <vscale x 2 x i32>
ret <vscale x 2 x i32> %evec
}
define <vscale x 2 x i64> @vsext_nxv2i8_nxv2i64(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vsext_nxv2i8_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsext.vf8 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
ret <vscale x 2 x i64> %evec
}
define <vscale x 2 x i64> @vzext_nxv2i8_nxv2i64(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vzext_nxv2i8_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vzext.vf8 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = zext <vscale x 2 x i8> %va to <vscale x 2 x i64>
ret <vscale x 2 x i64> %evec
}
define <vscale x 4 x i16> @vsext_nxv4i8_nxv4i16(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vsext_nxv4i8_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vsext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = sext <vscale x 4 x i8> %va to <vscale x 4 x i16>
ret <vscale x 4 x i16> %evec
}
define <vscale x 4 x i16> @vzext_nxv4i8_nxv4i16(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vzext_nxv4i8_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vzext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = zext <vscale x 4 x i8> %va to <vscale x 4 x i16>
ret <vscale x 4 x i16> %evec
}
define <vscale x 4 x i32> @vsext_nxv4i8_nxv4i32(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vsext_nxv4i8_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vsext.vf4 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = sext <vscale x 4 x i8> %va to <vscale x 4 x i32>
ret <vscale x 4 x i32> %evec
}
define <vscale x 4 x i32> @vzext_nxv4i8_nxv4i32(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vzext_nxv4i8_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vzext.vf4 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = zext <vscale x 4 x i8> %va to <vscale x 4 x i32>
ret <vscale x 4 x i32> %evec
}
define <vscale x 4 x i64> @vsext_nxv4i8_nxv4i64(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vsext_nxv4i8_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsext.vf8 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
ret <vscale x 4 x i64> %evec
}
define <vscale x 4 x i64> @vzext_nxv4i8_nxv4i64(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vzext_nxv4i8_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vzext.vf8 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = zext <vscale x 4 x i8> %va to <vscale x 4 x i64>
ret <vscale x 4 x i64> %evec
}
define <vscale x 8 x i16> @vsext_nxv8i8_nxv8i16(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vsext_nxv8i8_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vsext.vf2 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = sext <vscale x 8 x i8> %va to <vscale x 8 x i16>
ret <vscale x 8 x i16> %evec
}
define <vscale x 8 x i16> @vzext_nxv8i8_nxv8i16(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vzext_nxv8i8_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vzext.vf2 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = zext <vscale x 8 x i8> %va to <vscale x 8 x i16>
ret <vscale x 8 x i16> %evec
}
define <vscale x 8 x i32> @vsext_nxv8i8_nxv8i32(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vsext_nxv8i8_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vsext.vf4 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = sext <vscale x 8 x i8> %va to <vscale x 8 x i32>
ret <vscale x 8 x i32> %evec
}
define <vscale x 8 x i32> @vzext_nxv8i8_nxv8i32(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vzext_nxv8i8_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vzext.vf4 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = zext <vscale x 8 x i8> %va to <vscale x 8 x i32>
ret <vscale x 8 x i32> %evec
}
define <vscale x 8 x i64> @vsext_nxv8i8_nxv8i64(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vsext_nxv8i8_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsext.vf8 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
ret <vscale x 8 x i64> %evec
}
define <vscale x 8 x i64> @vzext_nxv8i8_nxv8i64(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vzext_nxv8i8_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vzext.vf8 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = zext <vscale x 8 x i8> %va to <vscale x 8 x i64>
ret <vscale x 8 x i64> %evec
}
define <vscale x 16 x i16> @vsext_nxv16i8_nxv16i16(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vsext_nxv16i8_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vsext.vf2 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = sext <vscale x 16 x i8> %va to <vscale x 16 x i16>
ret <vscale x 16 x i16> %evec
}
define <vscale x 16 x i16> @vzext_nxv16i8_nxv16i16(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vzext_nxv16i8_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vzext.vf2 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = zext <vscale x 16 x i8> %va to <vscale x 16 x i16>
ret <vscale x 16 x i16> %evec
}
define <vscale x 16 x i32> @vsext_nxv16i8_nxv16i32(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vsext_nxv16i8_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vsext.vf4 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = sext <vscale x 16 x i8> %va to <vscale x 16 x i32>
ret <vscale x 16 x i32> %evec
}
define <vscale x 16 x i32> @vzext_nxv16i8_nxv16i32(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vzext_nxv16i8_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vzext.vf4 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = zext <vscale x 16 x i8> %va to <vscale x 16 x i32>
ret <vscale x 16 x i32> %evec
}
define <vscale x 32 x i16> @vsext_nxv32i8_nxv32i16(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vsext_nxv32i8_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vsext.vf2 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = sext <vscale x 32 x i8> %va to <vscale x 32 x i16>
ret <vscale x 32 x i16> %evec
}
define <vscale x 32 x i16> @vzext_nxv32i8_nxv32i16(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vzext_nxv32i8_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vzext.vf2 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = zext <vscale x 32 x i8> %va to <vscale x 32 x i16>
ret <vscale x 32 x i16> %evec
}
define <vscale x 1 x i32> @vsext_nxv1i16_nxv1i32(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vsext_nxv1i16_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = sext <vscale x 1 x i16> %va to <vscale x 1 x i32>
ret <vscale x 1 x i32> %evec
}
define <vscale x 1 x i32> @vzext_nxv1i16_nxv1i32(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vzext_nxv1i16_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vzext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = zext <vscale x 1 x i16> %va to <vscale x 1 x i32>
ret <vscale x 1 x i32> %evec
}
define <vscale x 1 x i64> @vsext_nxv1i16_nxv1i64(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vsext_nxv1i16_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsext.vf4 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
ret <vscale x 1 x i64> %evec
}
define <vscale x 1 x i64> @vzext_nxv1i16_nxv1i64(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vzext_nxv1i16_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vzext.vf4 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = zext <vscale x 1 x i16> %va to <vscale x 1 x i64>
ret <vscale x 1 x i64> %evec
}
define <vscale x 2 x i32> @vsext_nxv2i16_nxv2i32(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vsext_nxv2i16_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vsext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = sext <vscale x 2 x i16> %va to <vscale x 2 x i32>
ret <vscale x 2 x i32> %evec
}
define <vscale x 2 x i32> @vzext_nxv2i16_nxv2i32(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vzext_nxv2i16_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vzext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = zext <vscale x 2 x i16> %va to <vscale x 2 x i32>
ret <vscale x 2 x i32> %evec
}
define <vscale x 2 x i64> @vsext_nxv2i16_nxv2i64(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vsext_nxv2i16_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsext.vf4 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
ret <vscale x 2 x i64> %evec
}
define <vscale x 2 x i64> @vzext_nxv2i16_nxv2i64(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vzext_nxv2i16_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vzext.vf4 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = zext <vscale x 2 x i16> %va to <vscale x 2 x i64>
ret <vscale x 2 x i64> %evec
}
define <vscale x 4 x i32> @vsext_nxv4i16_nxv4i32(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vsext_nxv4i16_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vsext.vf2 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = sext <vscale x 4 x i16> %va to <vscale x 4 x i32>
ret <vscale x 4 x i32> %evec
}
define <vscale x 4 x i32> @vzext_nxv4i16_nxv4i32(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vzext_nxv4i16_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vzext.vf2 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = zext <vscale x 4 x i16> %va to <vscale x 4 x i32>
ret <vscale x 4 x i32> %evec
}
define <vscale x 4 x i64> @vsext_nxv4i16_nxv4i64(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vsext_nxv4i16_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsext.vf4 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
ret <vscale x 4 x i64> %evec
}
define <vscale x 4 x i64> @vzext_nxv4i16_nxv4i64(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vzext_nxv4i16_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vzext.vf4 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = zext <vscale x 4 x i16> %va to <vscale x 4 x i64>
ret <vscale x 4 x i64> %evec
}
define <vscale x 8 x i32> @vsext_nxv8i16_nxv8i32(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vsext_nxv8i16_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vsext.vf2 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = sext <vscale x 8 x i16> %va to <vscale x 8 x i32>
ret <vscale x 8 x i32> %evec
}
define <vscale x 8 x i32> @vzext_nxv8i16_nxv8i32(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vzext_nxv8i16_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vzext.vf2 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = zext <vscale x 8 x i16> %va to <vscale x 8 x i32>
ret <vscale x 8 x i32> %evec
}
define <vscale x 8 x i64> @vsext_nxv8i16_nxv8i64(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vsext_nxv8i16_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsext.vf4 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
ret <vscale x 8 x i64> %evec
}
define <vscale x 8 x i64> @vzext_nxv8i16_nxv8i64(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vzext_nxv8i16_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vzext.vf4 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = zext <vscale x 8 x i16> %va to <vscale x 8 x i64>
ret <vscale x 8 x i64> %evec
}
define <vscale x 16 x i32> @vsext_nxv16i16_nxv16i32(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vsext_nxv16i16_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vsext.vf2 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = sext <vscale x 16 x i16> %va to <vscale x 16 x i32>
ret <vscale x 16 x i32> %evec
}
define <vscale x 16 x i32> @vzext_nxv16i16_nxv16i32(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vzext_nxv16i16_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vzext.vf2 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = zext <vscale x 16 x i16> %va to <vscale x 16 x i32>
ret <vscale x 16 x i32> %evec
}
define <vscale x 1 x i64> @vsext_nxv1i32_nxv1i64(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vsext_nxv1i32_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
ret <vscale x 1 x i64> %evec
}
define <vscale x 1 x i64> @vzext_nxv1i32_nxv1i64(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vzext_nxv1i32_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vzext.vf2 v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%evec = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
ret <vscale x 1 x i64> %evec
}
define <vscale x 2 x i64> @vsext_nxv2i32_nxv2i64(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vsext_nxv2i32_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsext.vf2 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
ret <vscale x 2 x i64> %evec
}
define <vscale x 2 x i64> @vzext_nxv2i32_nxv2i64(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vzext_nxv2i32_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vzext.vf2 v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%evec = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
ret <vscale x 2 x i64> %evec
}
define <vscale x 4 x i64> @vsext_nxv4i32_nxv4i64(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vsext_nxv4i32_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsext.vf2 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
ret <vscale x 4 x i64> %evec
}
define <vscale x 4 x i64> @vzext_nxv4i32_nxv4i64(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vzext_nxv4i32_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vzext.vf2 v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%evec = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
ret <vscale x 4 x i64> %evec
}
define <vscale x 8 x i64> @vsext_nxv8i32_nxv8i64(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vsext_nxv8i32_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsext.vf2 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
ret <vscale x 8 x i64> %evec
}
define <vscale x 8 x i64> @vzext_nxv8i32_nxv8i64(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vzext_nxv8i32_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vzext.vf2 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%evec = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
ret <vscale x 8 x i64> %evec
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i16> @vsext_nxv1i8_nxv1i16(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vsext_nxv1i8_nxv1i16:

View File

@ -1,867 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vmax_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 1 x i8> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %vb
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vmax_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 1 x i8> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vmax_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 1 x i8> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vmax_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 2 x i8> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %vb
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vmax_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 2 x i8> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vmax_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 2 x i8> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vmax_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 4 x i8> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %vb
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vmax_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 4 x i8> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vmax_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 4 x i8> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vmax_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 8 x i8> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %vb
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vmax_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 8 x i8> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vmax_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 8 x i8> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vmax_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 16 x i8> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %vb
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vmax_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 16 x i8> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vmax_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 16 x i8> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vmax_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 32 x i8> %va, %vb
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %vb
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vmax_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 32 x i8> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vmax_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 32 x i8> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vmax_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 64 x i8> %va, %vb
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %vb
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vmax_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 64 x i8> %va, %splat
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vmax_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 64 x i8> %va, %splat
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vmax_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 1 x i16> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %vb
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vmax_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 1 x i16> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vmax_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 1 x i16> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vmax_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 2 x i16> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %vb
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vmax_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 2 x i16> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vmax_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 2 x i16> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vmax_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 4 x i16> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %vb
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vmax_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 4 x i16> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vmax_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 4 x i16> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vmax_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 8 x i16> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %vb
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vmax_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 8 x i16> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vmax_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 8 x i16> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vmax_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 16 x i16> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %vb
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vmax_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 16 x i16> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vmax_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 16 x i16> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vmax_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 32 x i16> %va, %vb
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %vb
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vmax_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 32 x i16> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vmax_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 32 x i16> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vmax_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vmax_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 1 x i32> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %vb
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vmax_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vmax_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 1 x i32> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vmax_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vmax_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 1 x i32> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vmax_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
; CHECK-LABEL: vmax_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 2 x i32> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %vb
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vmax_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vmax_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 2 x i32> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vmax_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vmax_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 2 x i32> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vmax_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
; CHECK-LABEL: vmax_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 4 x i32> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %vb
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vmax_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vmax_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 4 x i32> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vmax_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vmax_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 4 x i32> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vmax_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
; CHECK-LABEL: vmax_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 8 x i32> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vb
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vmax_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vmax_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 8 x i32> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vmax_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vmax_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 8 x i32> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vmax_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
; CHECK-LABEL: vmax_vv_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 16 x i32> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %vb
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vmax_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vmax_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 16 x i32> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vmax_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vmax_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 16 x i32> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vmax_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
; CHECK-LABEL: vmax_vv_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 1 x i64> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %vb
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 1 x i64> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmax_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vmax_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 1 x i64> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vmax_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
; CHECK-LABEL: vmax_vv_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 2 x i64> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %vb
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 2 x i64> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmax_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vmax_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 2 x i64> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vmax_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
; CHECK-LABEL: vmax_vv_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 4 x i64> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %vb
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 4 x i64> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmax_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vmax_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 4 x i64> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vmax_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vmax_vv_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 8 x i64> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 8 x i64> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmax_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vmax_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 8 x i64> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vmax_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv1i8:
@ -697,11 +698,24 @@ define <vscale x 1 x i64> @vmax_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
}
define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmax_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vmax.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 1 x i64> %va, %splat
@ -735,11 +749,24 @@ define <vscale x 2 x i64> @vmax_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
}
define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmax_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vmax.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 2 x i64> %va, %splat
@ -773,11 +800,24 @@ define <vscale x 4 x i64> @vmax_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
}
define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmax_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vmax.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 4 x i64> %va, %splat
@ -811,11 +851,24 @@ define <vscale x 8 x i64> @vmax_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
}
define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmax.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmax_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vmax.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp sgt <vscale x 8 x i64> %va, %splat

View File

@ -1,867 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vmax_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 1 x i8> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %vb
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vmax_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 1 x i8> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vmax_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 1 x i8> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vmax_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 2 x i8> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %vb
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vmax_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 2 x i8> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vmax_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 2 x i8> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vmax_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 4 x i8> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %vb
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vmax_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 4 x i8> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vmax_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 4 x i8> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vmax_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 8 x i8> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %vb
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vmax_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 8 x i8> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vmax_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 8 x i8> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vmax_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 16 x i8> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %vb
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vmax_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 16 x i8> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vmax_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 16 x i8> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vmax_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 32 x i8> %va, %vb
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %vb
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vmax_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 32 x i8> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vmax_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 32 x i8> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vmax_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 64 x i8> %va, %vb
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %vb
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vmax_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmax_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 64 x i8> %va, %splat
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vmax_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vmax_vi_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 64 x i8> %va, %splat
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vmax_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 1 x i16> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %vb
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vmax_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 1 x i16> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vmax_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 1 x i16> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vmax_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 2 x i16> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %vb
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vmax_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 2 x i16> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vmax_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 2 x i16> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vmax_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 4 x i16> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %vb
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vmax_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 4 x i16> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vmax_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 4 x i16> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vmax_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 8 x i16> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %vb
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vmax_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 8 x i16> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vmax_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 8 x i16> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vmax_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 16 x i16> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %vb
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vmax_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 16 x i16> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vmax_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 16 x i16> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vmax_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
; CHECK-LABEL: vmax_vv_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 32 x i16> %va, %vb
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %vb
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vmax_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmax_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 32 x i16> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vmax_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vmax_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 32 x i16> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vmax_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vmax_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 1 x i32> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %vb
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vmax_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vmax_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 1 x i32> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vmax_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vmax_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 1 x i32> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vmax_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
; CHECK-LABEL: vmax_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 2 x i32> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %vb
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vmax_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vmax_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 2 x i32> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vmax_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vmax_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 2 x i32> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vmax_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
; CHECK-LABEL: vmax_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 4 x i32> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %vb
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vmax_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vmax_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 4 x i32> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vmax_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vmax_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 4 x i32> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vmax_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
; CHECK-LABEL: vmax_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 8 x i32> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vb
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vmax_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vmax_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 8 x i32> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vmax_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vmax_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 8 x i32> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vmax_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
; CHECK-LABEL: vmax_vv_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 16 x i32> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %vb
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vmax_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vmax_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 16 x i32> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vmax_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vmax_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 16 x i32> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vmax_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
; CHECK-LABEL: vmax_vv_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 1 x i64> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %vb
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 1 x i64> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmax_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vmax_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 1 x i64> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vmax_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
; CHECK-LABEL: vmax_vv_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 2 x i64> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %vb
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vmaxu.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 2 x i64> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmax_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vmax_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 2 x i64> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vmax_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
; CHECK-LABEL: vmax_vv_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 4 x i64> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %vb
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vmaxu.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 4 x i64> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmax_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vmax_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 4 x i64> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vmax_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vmax_vv_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmaxu.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 8 x i64> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vmaxu.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 8 x i64> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmax_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vmax_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 8 x i64> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vmax_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vmax_vv_nxv1i8:
@ -697,11 +698,24 @@ define <vscale x 1 x i64> @vmax_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
}
define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmax_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vmaxu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 1 x i64> %va, %splat
@ -735,11 +749,24 @@ define <vscale x 2 x i64> @vmax_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
}
define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmax_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vmaxu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 2 x i64> %va, %splat
@ -773,11 +800,24 @@ define <vscale x 4 x i64> @vmax_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
}
define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmax_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vmaxu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 4 x i64> %va, %splat
@ -811,11 +851,24 @@ define <vscale x 8 x i64> @vmax_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
}
define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vmax_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmaxu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmax_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vmaxu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ugt <vscale x 8 x i64> %va, %splat

View File

@ -1,867 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vmin_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 1 x i8> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %vb
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vmin_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp slt <vscale x 1 x i8> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vmin_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp slt <vscale x 1 x i8> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vmin_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 2 x i8> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %vb
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vmin_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp slt <vscale x 2 x i8> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vmin_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp slt <vscale x 2 x i8> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vmin_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 4 x i8> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %vb
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vmin_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp slt <vscale x 4 x i8> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vmin_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp slt <vscale x 4 x i8> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vmin_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 8 x i8> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %vb
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vmin_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp slt <vscale x 8 x i8> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vmin_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp slt <vscale x 8 x i8> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vmin_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 16 x i8> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %vb
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vmin_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp slt <vscale x 16 x i8> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vmin_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp slt <vscale x 16 x i8> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vmin_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 32 x i8> %va, %vb
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %vb
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vmin_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp slt <vscale x 32 x i8> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vmin_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp slt <vscale x 32 x i8> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vmin_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 64 x i8> %va, %vb
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %vb
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vmin_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%cmp = icmp slt <vscale x 64 x i8> %va, %splat
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vmin_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%cmp = icmp slt <vscale x 64 x i8> %va, %splat
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vmin_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 1 x i16> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %vb
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vmin_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp slt <vscale x 1 x i16> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vmin_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp slt <vscale x 1 x i16> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vmin_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 2 x i16> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %vb
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vmin_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp slt <vscale x 2 x i16> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vmin_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp slt <vscale x 2 x i16> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vmin_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 4 x i16> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %vb
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vmin_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp slt <vscale x 4 x i16> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vmin_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp slt <vscale x 4 x i16> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vmin_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 8 x i16> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %vb
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vmin_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp slt <vscale x 8 x i16> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vmin_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp slt <vscale x 8 x i16> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vmin_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 16 x i16> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %vb
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vmin_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp slt <vscale x 16 x i16> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vmin_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp slt <vscale x 16 x i16> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vmin_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 32 x i16> %va, %vb
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %vb
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vmin_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp slt <vscale x 32 x i16> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vmin_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp slt <vscale x 32 x i16> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vmin_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vmin_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 1 x i32> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %vb
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vmin_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vmin_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp slt <vscale x 1 x i32> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vmin_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vmin_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp slt <vscale x 1 x i32> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vmin_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
; CHECK-LABEL: vmin_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 2 x i32> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %vb
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vmin_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vmin_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp slt <vscale x 2 x i32> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vmin_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vmin_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp slt <vscale x 2 x i32> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vmin_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
; CHECK-LABEL: vmin_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 4 x i32> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %vb
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vmin_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vmin_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp slt <vscale x 4 x i32> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vmin_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vmin_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp slt <vscale x 4 x i32> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vmin_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
; CHECK-LABEL: vmin_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 8 x i32> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vb
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vmin_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vmin_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp slt <vscale x 8 x i32> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vmin_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vmin_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp slt <vscale x 8 x i32> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vmin_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
; CHECK-LABEL: vmin_vv_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 16 x i32> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %vb
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vmin_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vmin_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp slt <vscale x 16 x i32> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vmin_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vmin_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp slt <vscale x 16 x i32> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vmin_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
; CHECK-LABEL: vmin_vv_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 1 x i64> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %vb
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp slt <vscale x 1 x i64> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmin_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vmin_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp slt <vscale x 1 x i64> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vmin_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
; CHECK-LABEL: vmin_vv_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 2 x i64> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %vb
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vmin.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp slt <vscale x 2 x i64> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmin_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vmin_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp slt <vscale x 2 x i64> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vmin_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
; CHECK-LABEL: vmin_vv_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 4 x i64> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %vb
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vmin.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp slt <vscale x 4 x i64> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmin_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vmin_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp slt <vscale x 4 x i64> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vmin_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vmin_vv_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmin.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 8 x i64> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vmin.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp slt <vscale x 8 x i64> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmin_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vmin_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp slt <vscale x 8 x i64> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vmin_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv1i8:
@ -697,11 +698,24 @@ define <vscale x 1 x i64> @vmin_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
}
define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmin_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vmin.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp slt <vscale x 1 x i64> %va, %splat
@ -735,11 +749,24 @@ define <vscale x 2 x i64> @vmin_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
}
define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmin_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vmin.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp slt <vscale x 2 x i64> %va, %splat
@ -773,11 +800,24 @@ define <vscale x 4 x i64> @vmin_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
}
define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmin_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vmin.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp slt <vscale x 4 x i64> %va, %splat
@ -811,11 +851,24 @@ define <vscale x 8 x i64> @vmin_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
}
define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmin.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmin_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vmin.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp slt <vscale x 8 x i64> %va, %splat

View File

@ -1,867 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vmin_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 1 x i8> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %vb
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vmin_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ult <vscale x 1 x i8> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vmin_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ult <vscale x 1 x i8> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vmin_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 2 x i8> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %vb
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vmin_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ult <vscale x 2 x i8> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vmin_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ult <vscale x 2 x i8> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vmin_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 4 x i8> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %vb
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vmin_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ult <vscale x 4 x i8> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vmin_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ult <vscale x 4 x i8> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vmin_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 8 x i8> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %vb
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vmin_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ult <vscale x 8 x i8> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vmin_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ult <vscale x 8 x i8> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vmin_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 16 x i8> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %vb
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vmin_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ult <vscale x 16 x i8> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vmin_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ult <vscale x 16 x i8> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vmin_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 32 x i8> %va, %vb
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %vb
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vmin_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp ult <vscale x 32 x i8> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vmin_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp ult <vscale x 32 x i8> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vmin_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 64 x i8> %va, %vb
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %vb
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vmin_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmin_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%cmp = icmp ult <vscale x 64 x i8> %va, %splat
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vmin_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vmin_vi_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 -3, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%cmp = icmp ult <vscale x 64 x i8> %va, %splat
%vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vmin_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 1 x i16> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %vb
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vmin_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ult <vscale x 1 x i16> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vmin_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ult <vscale x 1 x i16> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vmin_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 2 x i16> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %vb
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vmin_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ult <vscale x 2 x i16> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vmin_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ult <vscale x 2 x i16> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vmin_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 4 x i16> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %vb
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vmin_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ult <vscale x 4 x i16> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vmin_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ult <vscale x 4 x i16> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vmin_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 8 x i16> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %vb
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vmin_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ult <vscale x 8 x i16> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vmin_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ult <vscale x 8 x i16> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vmin_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 16 x i16> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %vb
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vmin_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ult <vscale x 16 x i16> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vmin_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ult <vscale x 16 x i16> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vmin_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
; CHECK-LABEL: vmin_vv_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 32 x i16> %va, %vb
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %vb
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vmin_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmin_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp ult <vscale x 32 x i16> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vmin_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vmin_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -3, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%cmp = icmp ult <vscale x 32 x i16> %va, %splat
%vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vmin_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vmin_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 1 x i32> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %vb
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vmin_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vmin_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ult <vscale x 1 x i32> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vmin_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vmin_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ult <vscale x 1 x i32> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vmin_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
; CHECK-LABEL: vmin_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 2 x i32> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %vb
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vmin_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vmin_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ult <vscale x 2 x i32> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vmin_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vmin_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ult <vscale x 2 x i32> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vmin_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
; CHECK-LABEL: vmin_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 4 x i32> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %vb
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vmin_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vmin_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ult <vscale x 4 x i32> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vmin_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vmin_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ult <vscale x 4 x i32> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vmin_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
; CHECK-LABEL: vmin_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 8 x i32> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vb
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vmin_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vmin_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ult <vscale x 8 x i32> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vmin_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vmin_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ult <vscale x 8 x i32> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vmin_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
; CHECK-LABEL: vmin_vv_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 16 x i32> %va, %vb
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %vb
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vmin_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vmin_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ult <vscale x 16 x i32> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vmin_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vmin_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -3, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%cmp = icmp ult <vscale x 16 x i32> %va, %splat
%vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vmin_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
; CHECK-LABEL: vmin_vv_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 1 x i64> %va, %vb
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %vb
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ult <vscale x 1 x i64> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmin_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vmin_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ult <vscale x 1 x i64> %va, %splat
%vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vmin_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
; CHECK-LABEL: vmin_vv_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v10
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 2 x i64> %va, %vb
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %vb
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vminu.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ult <vscale x 2 x i64> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmin_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vmin_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ult <vscale x 2 x i64> %va, %splat
%vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vmin_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
; CHECK-LABEL: vmin_vv_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v12
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 4 x i64> %va, %vb
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %vb
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vminu.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ult <vscale x 4 x i64> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmin_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vmin_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ult <vscale x 4 x i64> %va, %splat
%vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vmin_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vmin_vv_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vminu.vv v8, v8, v16
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 8 x i64> %va, %vb
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vminu.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ult <vscale x 8 x i64> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmin_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vmin_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -3
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -3, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ult <vscale x 8 x i64> %va, %splat
%vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vmin_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vmin_vv_nxv1i8:
@ -697,11 +698,24 @@ define <vscale x 1 x i64> @vmin_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
}
define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmin_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vminu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%cmp = icmp ult <vscale x 1 x i64> %va, %splat
@ -735,11 +749,24 @@ define <vscale x 2 x i64> @vmin_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
}
define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmin_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vminu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%cmp = icmp ult <vscale x 2 x i64> %va, %splat
@ -773,11 +800,24 @@ define <vscale x 4 x i64> @vmin_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
}
define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmin_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vminu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%cmp = icmp ult <vscale x 4 x i64> %va, %splat
@ -811,11 +851,24 @@ define <vscale x 8 x i64> @vmin_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
}
define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vmin_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vminu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmin_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vminu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%cmp = icmp ult <vscale x 8 x i64> %va, %splat

View File

@ -1,896 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vmul_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vmul_vv_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = mul <vscale x 1 x i8> %va, %vb
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vmul_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmul_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = mul <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vmul_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vmul_vi_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = mul <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vmul_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vmul_vv_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = mul <vscale x 2 x i8> %va, %vb
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vmul_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmul_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = mul <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vmul_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vmul_vi_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = mul <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vmul_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vmul_vv_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = mul <vscale x 4 x i8> %va, %vb
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vmul_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmul_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = mul <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vmul_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vmul_vi_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = mul <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vmul_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vmul_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = mul <vscale x 8 x i8> %va, %vb
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vmul_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmul_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = mul <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vmul_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vmul_vi_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = mul <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vmul_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
; CHECK-LABEL: vmul_vv_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = mul <vscale x 16 x i8> %va, %vb
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vmul_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmul_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = mul <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vmul_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vmul_vi_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = mul <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vmul_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
; CHECK-LABEL: vmul_vv_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = mul <vscale x 32 x i8> %va, %vb
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vmul_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmul_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = mul <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vmul_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vmul_vi_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = mul <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vmul_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
; CHECK-LABEL: vmul_vv_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = mul <vscale x 64 x i8> %va, %vb
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vmul_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vmul_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = mul <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vmul_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vmul_vi_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = mul <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vmul_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vmul_vv_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = mul <vscale x 1 x i16> %va, %vb
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vmul_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmul_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = mul <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vmul_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vmul_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = mul <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vmul_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vmul_vv_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = mul <vscale x 2 x i16> %va, %vb
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vmul_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmul_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = mul <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vmul_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vmul_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = mul <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vmul_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vmul_vv_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = mul <vscale x 4 x i16> %va, %vb
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vmul_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmul_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = mul <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vmul_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vmul_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = mul <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vmul_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vmul_vv_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = mul <vscale x 8 x i16> %va, %vb
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vmul_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmul_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = mul <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vmul_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vmul_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = mul <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vmul_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
; CHECK-LABEL: vmul_vv_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = mul <vscale x 16 x i16> %va, %vb
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vmul_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmul_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = mul <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vmul_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vmul_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = mul <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vmul_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
; CHECK-LABEL: vmul_vv_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = mul <vscale x 32 x i16> %va, %vb
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vmul_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vmul_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = mul <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vmul_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vmul_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = mul <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vmul_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vmul_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = mul <vscale x 1 x i32> %va, %vb
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vmul_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vmul_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = mul <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vmul_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vmul_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = mul <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vmul_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
; CHECK-LABEL: vmul_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = mul <vscale x 2 x i32> %va, %vb
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vmul_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vmul_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = mul <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vmul_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vmul_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = mul <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vmul_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
; CHECK-LABEL: vmul_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = mul <vscale x 4 x i32> %va, %vb
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vmul_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vmul_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = mul <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vmul_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vmul_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = mul <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vmul_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
; CHECK-LABEL: vmul_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = mul <vscale x 8 x i32> %va, %vb
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vmul_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vmul_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = mul <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vmul_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vmul_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = mul <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vmul_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
; CHECK-LABEL: vmul_vv_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = mul <vscale x 16 x i32> %va, %vb
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vmul_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vmul_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = mul <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vmul_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vmul_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = mul <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vmul_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
; CHECK-LABEL: vmul_vv_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = mul <vscale x 1 x i64> %va, %vb
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmul_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vmul_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = mul <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmul_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = mul <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmul_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv1i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = mul <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vmul_vi_nxv1i64_2(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv1i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 4
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = mul <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vmul_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
; CHECK-LABEL: vmul_vv_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = mul <vscale x 2 x i64> %va, %vb
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmul_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vmul_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vmul.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = mul <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmul_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = mul <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmul_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv2i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = mul <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vmul_vi_nxv2i64_2(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv2i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 4
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = mul <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vmul_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
; CHECK-LABEL: vmul_vv_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = mul <vscale x 4 x i64> %va, %vb
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmul_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vmul_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vmul.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = mul <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmul_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = mul <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmul_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv4i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = mul <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vmul_vi_nxv4i64_2(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv4i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 4
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = mul <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vmul_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vmul_vv_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmul.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = mul <vscale x 8 x i64> %va, %vb
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmul_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vmul_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vmul.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = mul <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmul_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = mul <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmul_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv8i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 2, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = mul <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vmul_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vmul_vi_nxv8i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 4
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = mul <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vmul_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vmul_vv_nxv1i8:
@ -642,11 +643,24 @@ define <vscale x 1 x i64> @vmul_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
}
define <vscale x 1 x i64> @vmul_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vmul_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmul_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vmul.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = mul <vscale x 1 x i64> %va, %splat
@ -701,11 +715,24 @@ define <vscale x 2 x i64> @vmul_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
}
define <vscale x 2 x i64> @vmul_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vmul_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmul_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vmul.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = mul <vscale x 2 x i64> %va, %splat
@ -760,11 +787,24 @@ define <vscale x 4 x i64> @vmul_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
}
define <vscale x 4 x i64> @vmul_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vmul_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmul_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vmul.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = mul <vscale x 4 x i64> %va, %splat
@ -819,11 +859,24 @@ define <vscale x 8 x i64> @vmul_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
}
define <vscale x 8 x i64> @vmul_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vmul_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vmul_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vmul.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = mul <vscale x 8 x i64> %va, %splat

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vor_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vor_vx_nxv1i8:
@ -884,11 +885,24 @@ define <vscale x 16 x i32> @vor_vx_nxv16i32_2(<vscale x 16 x i32> %va) {
}
define <vscale x 1 x i64> @vor_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vor_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vor.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vor_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vor.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = or <vscale x 1 x i64> %va, %splat
@ -933,11 +947,24 @@ define <vscale x 1 x i64> @vor_vx_nxv1i64_2(<vscale x 1 x i64> %va) {
}
define <vscale x 2 x i64> @vor_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vor_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vor.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vor_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vor.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = or <vscale x 2 x i64> %va, %splat
@ -982,11 +1009,24 @@ define <vscale x 2 x i64> @vor_vx_nxv2i64_2(<vscale x 2 x i64> %va) {
}
define <vscale x 4 x i64> @vor_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vor_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vor.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vor_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vor.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = or <vscale x 4 x i64> %va, %splat
@ -1031,11 +1071,24 @@ define <vscale x 4 x i64> @vor_vx_nxv4i64_2(<vscale x 4 x i64> %va) {
}
define <vscale x 8 x i64> @vor_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vor_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vor.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vor_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vor.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = or <vscale x 8 x i64> %va, %splat

View File

@ -1,978 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vrem_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vrem_vv_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = srem <vscale x 1 x i8> %va, %vb
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vrem_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrem_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vrem_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vrem_vi_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v9, v9, v8
; CHECK-NEXT: vsra.vi v9, v9, 2
; CHECK-NEXT: vsrl.vi v10, v9, 7
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vrem_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vrem_vv_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = srem <vscale x 2 x i8> %va, %vb
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vrem_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrem_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vrem_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vrem_vi_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v9, v9, v8
; CHECK-NEXT: vsra.vi v9, v9, 2
; CHECK-NEXT: vsrl.vi v10, v9, 7
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vrem_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vrem_vv_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = srem <vscale x 4 x i8> %va, %vb
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vrem_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrem_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vrem_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vrem_vi_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v9, v9, v8
; CHECK-NEXT: vsra.vi v9, v9, 2
; CHECK-NEXT: vsrl.vi v10, v9, 7
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vrem_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vrem_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = srem <vscale x 8 x i8> %va, %vb
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vrem_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrem_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vrem_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vrem_vi_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v9, v9, v8
; CHECK-NEXT: vsra.vi v9, v9, 2
; CHECK-NEXT: vsrl.vi v10, v9, 7
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vrem_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
; CHECK-LABEL: vrem_vv_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = srem <vscale x 16 x i8> %va, %vb
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vrem_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrem_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = srem <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vrem_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vrem_vi_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmulh.vx v10, v8, a0
; CHECK-NEXT: vsub.vv v10, v10, v8
; CHECK-NEXT: vsra.vi v10, v10, 2
; CHECK-NEXT: vsrl.vi v12, v10, 7
; CHECK-NEXT: vadd.vv v10, v10, v12
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = srem <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vrem_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
; CHECK-LABEL: vrem_vv_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = srem <vscale x 32 x i8> %va, %vb
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vrem_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrem_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = srem <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vrem_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vrem_vi_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmulh.vx v12, v8, a0
; CHECK-NEXT: vsub.vv v12, v12, v8
; CHECK-NEXT: vsra.vi v12, v12, 2
; CHECK-NEXT: vsrl.vi v16, v12, 7
; CHECK-NEXT: vadd.vv v12, v12, v16
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = srem <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vrem_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
; CHECK-LABEL: vrem_vv_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = srem <vscale x 64 x i8> %va, %vb
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vrem_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrem_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = srem <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vrem_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vrem_vi_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 109
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmulh.vx v16, v8, a0
; CHECK-NEXT: vsub.vv v16, v16, v8
; CHECK-NEXT: vsra.vi v16, v16, 2
; CHECK-NEXT: vsrl.vi v24, v16, 7
; CHECK-NEXT: vadd.vv v16, v16, v24
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 -7, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = srem <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vrem_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vrem_vv_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = srem <vscale x 1 x i16> %va, %vb
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vrem_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrem_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vsrl.vi v10, v9, 15
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vrem_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vrem_vv_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = srem <vscale x 2 x i16> %va, %vb
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vrem_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrem_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vsrl.vi v10, v9, 15
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vrem_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vrem_vv_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = srem <vscale x 4 x i16> %va, %vb
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vrem_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrem_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vsrl.vi v10, v9, 15
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vrem_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vrem_vv_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = srem <vscale x 8 x i16> %va, %vb
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vrem_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrem_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmulh.vx v10, v8, a0
; CHECK-NEXT: vsra.vi v10, v10, 1
; CHECK-NEXT: vsrl.vi v12, v10, 15
; CHECK-NEXT: vadd.vv v10, v10, v12
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vrem_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
; CHECK-LABEL: vrem_vv_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = srem <vscale x 16 x i16> %va, %vb
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vrem_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrem_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = srem <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmulh.vx v12, v8, a0
; CHECK-NEXT: vsra.vi v12, v12, 1
; CHECK-NEXT: vsrl.vi v16, v12, 15
; CHECK-NEXT: vadd.vv v12, v12, v16
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = srem <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vrem_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
; CHECK-LABEL: vrem_vv_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = srem <vscale x 32 x i16> %va, %vb
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vrem_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrem_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = srem <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vrem_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmulh.vx v16, v8, a0
; CHECK-NEXT: vsra.vi v16, v16, 1
; CHECK-NEXT: vsrl.vi v24, v16, 15
; CHECK-NEXT: vadd.vv v16, v16, v24
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = srem <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vrem_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vrem_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = srem <vscale x 1 x i32> %va, %vb
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vrem_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vrem_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vrem_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vrem_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addi a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v9, v9, v8
; CHECK-NEXT: vsrl.vi v10, v9, 31
; CHECK-NEXT: vsra.vi v9, v9, 2
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vrem_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
; CHECK-LABEL: vrem_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = srem <vscale x 2 x i32> %va, %vb
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vrem_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vrem_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vrem_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vrem_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addi a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v9, v9, v8
; CHECK-NEXT: vsrl.vi v10, v9, 31
; CHECK-NEXT: vsra.vi v9, v9, 2
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vrem_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
; CHECK-LABEL: vrem_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = srem <vscale x 4 x i32> %va, %vb
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vrem_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vrem_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vrem_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vrem_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addi a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmulh.vx v10, v8, a0
; CHECK-NEXT: vsub.vv v10, v10, v8
; CHECK-NEXT: vsrl.vi v12, v10, 31
; CHECK-NEXT: vsra.vi v10, v10, 2
; CHECK-NEXT: vadd.vv v10, v10, v12
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vrem_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
; CHECK-LABEL: vrem_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = srem <vscale x 8 x i32> %va, %vb
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vrem_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vrem_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vrem_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vrem_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addi a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmulh.vx v12, v8, a0
; CHECK-NEXT: vsub.vv v12, v12, v8
; CHECK-NEXT: vsrl.vi v16, v12, 31
; CHECK-NEXT: vsra.vi v12, v12, 2
; CHECK-NEXT: vadd.vv v12, v12, v16
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vrem_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
; CHECK-LABEL: vrem_vv_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = srem <vscale x 16 x i32> %va, %vb
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vrem_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vrem_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = srem <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vrem_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vrem_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addi a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmulh.vx v16, v8, a0
; CHECK-NEXT: vsub.vv v16, v16, v8
; CHECK-NEXT: vsrl.vi v24, v16, 31
; CHECK-NEXT: vsra.vi v16, v16, 2
; CHECK-NEXT: vadd.vv v16, v16, v24
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = srem <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vrem_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
; CHECK-LABEL: vrem_vv_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = srem <vscale x 1 x i64> %va, %vb
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vrem_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vrem_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vrem_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 748983
; CHECK-NEXT: addi a0, a0, -586
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: lui a0, 898779
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vmulh.vv v9, v8, v9
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v10, v9, a0
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vrem_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
; CHECK-LABEL: vrem_vv_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = srem <vscale x 2 x i64> %va, %vb
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vrem_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vrem_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vrem.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vrem_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 748983
; CHECK-NEXT: addi a0, a0, -586
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: lui a0, 898779
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vmulh.vv v10, v8, v10
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v12, v10, a0
; CHECK-NEXT: vsra.vi v10, v10, 1
; CHECK-NEXT: vadd.vv v10, v10, v12
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vrem_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
; CHECK-LABEL: vrem_vv_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = srem <vscale x 4 x i64> %va, %vb
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vrem_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vrem_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vrem.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vrem_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 748983
; CHECK-NEXT: addi a0, a0, -586
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: lui a0, 898779
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vmulh.vv v12, v8, v12
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v16, v12, a0
; CHECK-NEXT: vsra.vi v12, v12, 1
; CHECK-NEXT: vadd.vv v12, v12, v16
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vrem_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vrem_vv_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vrem.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = srem <vscale x 8 x i64> %va, %vb
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vrem_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vrem_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vrem.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vrem_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: lui a0, 748983
; CHECK-NEXT: addi a0, a0, -586
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: lui a0, 898779
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vmulh.vv v16, v8, v16
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v24, v16, a0
; CHECK-NEXT: vsra.vi v16, v16, 1
; CHECK-NEXT: vadd.vv v16, v16, v24
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vrem_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vrem_vv_nxv1i8:
@ -311,18 +312,31 @@ define <vscale x 1 x i16> @vrem_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %
}
define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vsrl.vi v10, v9, 15
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv1i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; RV32-NEXT: vmulh.vx v9, v8, a0
; RV32-NEXT: vsra.vi v9, v9, 1
; RV32-NEXT: vsrl.vi v10, v9, 15
; RV32-NEXT: vadd.vv v9, v9, v10
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv1i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; RV64-NEXT: vmulh.vx v9, v8, a0
; RV64-NEXT: vsra.vi v9, v9, 1
; RV64-NEXT: vsrl.vi v10, v9, 15
; RV64-NEXT: vadd.vv v9, v9, v10
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i16> %va, %splat
@ -352,18 +366,31 @@ define <vscale x 2 x i16> @vrem_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %
}
define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vsrl.vi v10, v9, 15
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv2i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; RV32-NEXT: vmulh.vx v9, v8, a0
; RV32-NEXT: vsra.vi v9, v9, 1
; RV32-NEXT: vsrl.vi v10, v9, 15
; RV32-NEXT: vadd.vv v9, v9, v10
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv2i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; RV64-NEXT: vmulh.vx v9, v8, a0
; RV64-NEXT: vsra.vi v9, v9, 1
; RV64-NEXT: vsrl.vi v10, v9, 15
; RV64-NEXT: vadd.vv v9, v9, v10
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i16> %va, %splat
@ -393,18 +420,31 @@ define <vscale x 4 x i16> @vrem_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %
}
define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vsrl.vi v10, v9, 15
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv4i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; RV32-NEXT: vmulh.vx v9, v8, a0
; RV32-NEXT: vsra.vi v9, v9, 1
; RV32-NEXT: vsrl.vi v10, v9, 15
; RV32-NEXT: vadd.vv v9, v9, v10
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv4i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; RV64-NEXT: vmulh.vx v9, v8, a0
; RV64-NEXT: vsra.vi v9, v9, 1
; RV64-NEXT: vsrl.vi v10, v9, 15
; RV64-NEXT: vadd.vv v9, v9, v10
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i16> %va, %splat
@ -434,18 +474,31 @@ define <vscale x 8 x i16> @vrem_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %
}
define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmulh.vx v10, v8, a0
; CHECK-NEXT: vsra.vi v10, v10, 1
; CHECK-NEXT: vsrl.vi v12, v10, 15
; CHECK-NEXT: vadd.vv v10, v10, v12
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v10
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv8i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; RV32-NEXT: vmulh.vx v10, v8, a0
; RV32-NEXT: vsra.vi v10, v10, 1
; RV32-NEXT: vsrl.vi v12, v10, 15
; RV32-NEXT: vadd.vv v10, v10, v12
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv8i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; RV64-NEXT: vmulh.vx v10, v8, a0
; RV64-NEXT: vsra.vi v10, v10, 1
; RV64-NEXT: vsrl.vi v12, v10, 15
; RV64-NEXT: vadd.vv v10, v10, v12
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v10
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i16> %va, %splat
@ -475,18 +528,31 @@ define <vscale x 16 x i16> @vrem_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signex
}
define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmulh.vx v12, v8, a0
; CHECK-NEXT: vsra.vi v12, v12, 1
; CHECK-NEXT: vsrl.vi v16, v12, 15
; CHECK-NEXT: vadd.vv v12, v12, v16
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v12
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv16i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; RV32-NEXT: vmulh.vx v12, v8, a0
; RV32-NEXT: vsra.vi v12, v12, 1
; RV32-NEXT: vsrl.vi v16, v12, 15
; RV32-NEXT: vadd.vv v12, v12, v16
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv16i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; RV64-NEXT: vmulh.vx v12, v8, a0
; RV64-NEXT: vsra.vi v12, v12, 1
; RV64-NEXT: vsrl.vi v16, v12, 15
; RV64-NEXT: vadd.vv v12, v12, v16
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v12
; RV64-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = srem <vscale x 16 x i16> %va, %splat
@ -516,18 +582,31 @@ define <vscale x 32 x i16> @vrem_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signex
}
define <vscale x 32 x i16> @vrem_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vrem_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048571
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmulh.vx v16, v8, a0
; CHECK-NEXT: vsra.vi v16, v16, 1
; CHECK-NEXT: vsrl.vi v24, v16, 15
; CHECK-NEXT: vadd.vv v16, v16, v24
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v16
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv32i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 1048571
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; RV32-NEXT: vmulh.vx v16, v8, a0
; RV32-NEXT: vsra.vi v16, v16, 1
; RV32-NEXT: vsrl.vi v24, v16, 15
; RV32-NEXT: vadd.vv v16, v16, v24
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv32i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1048571
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; RV64-NEXT: vmulh.vx v16, v8, a0
; RV64-NEXT: vsra.vi v16, v16, 1
; RV64-NEXT: vsrl.vi v24, v16, 15
; RV64-NEXT: vadd.vv v16, v16, v24
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v16
; RV64-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = srem <vscale x 32 x i16> %va, %splat
@ -557,19 +636,33 @@ define <vscale x 1 x i32> @vrem_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %
}
define <vscale x 1 x i32> @vrem_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vrem_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addiw a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v9, v9, v8
; CHECK-NEXT: vsra.vi v9, v9, 2
; CHECK-NEXT: vsrl.vi v10, v9, 31
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv1i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 449390
; RV32-NEXT: addi a0, a0, -1171
; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; RV32-NEXT: vmulh.vx v9, v8, a0
; RV32-NEXT: vsub.vv v9, v9, v8
; RV32-NEXT: vsrl.vi v10, v9, 31
; RV32-NEXT: vsra.vi v9, v9, 2
; RV32-NEXT: vadd.vv v9, v9, v10
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv1i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 449390
; RV64-NEXT: addiw a0, a0, -1171
; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; RV64-NEXT: vmulh.vx v9, v8, a0
; RV64-NEXT: vsub.vv v9, v9, v8
; RV64-NEXT: vsra.vi v9, v9, 2
; RV64-NEXT: vsrl.vi v10, v9, 31
; RV64-NEXT: vadd.vv v9, v9, v10
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i32> %va, %splat
@ -599,19 +692,33 @@ define <vscale x 2 x i32> @vrem_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %
}
define <vscale x 2 x i32> @vrem_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vrem_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addiw a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: vsub.vv v9, v9, v8
; CHECK-NEXT: vsra.vi v9, v9, 2
; CHECK-NEXT: vsrl.vi v10, v9, 31
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv2i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 449390
; RV32-NEXT: addi a0, a0, -1171
; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; RV32-NEXT: vmulh.vx v9, v8, a0
; RV32-NEXT: vsub.vv v9, v9, v8
; RV32-NEXT: vsrl.vi v10, v9, 31
; RV32-NEXT: vsra.vi v9, v9, 2
; RV32-NEXT: vadd.vv v9, v9, v10
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv2i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 449390
; RV64-NEXT: addiw a0, a0, -1171
; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; RV64-NEXT: vmulh.vx v9, v8, a0
; RV64-NEXT: vsub.vv v9, v9, v8
; RV64-NEXT: vsra.vi v9, v9, 2
; RV64-NEXT: vsrl.vi v10, v9, 31
; RV64-NEXT: vadd.vv v9, v9, v10
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i32> %va, %splat
@ -641,19 +748,33 @@ define <vscale x 4 x i32> @vrem_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %
}
define <vscale x 4 x i32> @vrem_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vrem_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addiw a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmulh.vx v10, v8, a0
; CHECK-NEXT: vsub.vv v10, v10, v8
; CHECK-NEXT: vsra.vi v10, v10, 2
; CHECK-NEXT: vsrl.vi v12, v10, 31
; CHECK-NEXT: vadd.vv v10, v10, v12
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v10
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv4i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 449390
; RV32-NEXT: addi a0, a0, -1171
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vmulh.vx v10, v8, a0
; RV32-NEXT: vsub.vv v10, v10, v8
; RV32-NEXT: vsrl.vi v12, v10, 31
; RV32-NEXT: vsra.vi v10, v10, 2
; RV32-NEXT: vadd.vv v10, v10, v12
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv4i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 449390
; RV64-NEXT: addiw a0, a0, -1171
; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV64-NEXT: vmulh.vx v10, v8, a0
; RV64-NEXT: vsub.vv v10, v10, v8
; RV64-NEXT: vsra.vi v10, v10, 2
; RV64-NEXT: vsrl.vi v12, v10, 31
; RV64-NEXT: vadd.vv v10, v10, v12
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v10
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i32> %va, %splat
@ -683,19 +804,33 @@ define <vscale x 8 x i32> @vrem_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %
}
define <vscale x 8 x i32> @vrem_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vrem_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addiw a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmulh.vx v12, v8, a0
; CHECK-NEXT: vsub.vv v12, v12, v8
; CHECK-NEXT: vsra.vi v12, v12, 2
; CHECK-NEXT: vsrl.vi v16, v12, 31
; CHECK-NEXT: vadd.vv v12, v12, v16
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v12
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv8i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 449390
; RV32-NEXT: addi a0, a0, -1171
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vmulh.vx v12, v8, a0
; RV32-NEXT: vsub.vv v12, v12, v8
; RV32-NEXT: vsrl.vi v16, v12, 31
; RV32-NEXT: vsra.vi v12, v12, 2
; RV32-NEXT: vadd.vv v12, v12, v16
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv8i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 449390
; RV64-NEXT: addiw a0, a0, -1171
; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV64-NEXT: vmulh.vx v12, v8, a0
; RV64-NEXT: vsub.vv v12, v12, v8
; RV64-NEXT: vsra.vi v12, v12, 2
; RV64-NEXT: vsrl.vi v16, v12, 31
; RV64-NEXT: vadd.vv v12, v12, v16
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v12
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i32> %va, %splat
@ -725,19 +860,33 @@ define <vscale x 16 x i32> @vrem_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signex
}
define <vscale x 16 x i32> @vrem_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vrem_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 449390
; CHECK-NEXT: addiw a0, a0, -1171
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmulh.vx v16, v8, a0
; CHECK-NEXT: vsub.vv v16, v16, v8
; CHECK-NEXT: vsra.vi v16, v16, 2
; CHECK-NEXT: vsrl.vi v24, v16, 31
; CHECK-NEXT: vadd.vv v16, v16, v24
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v16
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv16i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 449390
; RV32-NEXT: addi a0, a0, -1171
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; RV32-NEXT: vmulh.vx v16, v8, a0
; RV32-NEXT: vsub.vv v16, v16, v8
; RV32-NEXT: vsrl.vi v24, v16, 31
; RV32-NEXT: vsra.vi v16, v16, 2
; RV32-NEXT: vadd.vv v16, v16, v24
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv16i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 449390
; RV64-NEXT: addiw a0, a0, -1171
; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; RV64-NEXT: vmulh.vx v16, v8, a0
; RV64-NEXT: vsub.vv v16, v16, v8
; RV64-NEXT: vsra.vi v16, v16, 2
; RV64-NEXT: vsrl.vi v24, v16, 31
; RV64-NEXT: vadd.vv v16, v16, v24
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v16
; RV64-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = srem <vscale x 16 x i32> %va, %splat
@ -755,11 +904,24 @@ define <vscale x 1 x i64> @vrem_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
}
define <vscale x 1 x i64> @vrem_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vrem_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vrem.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i64> %va, %splat
@ -767,25 +929,48 @@ define <vscale x 1 x i64> @vrem_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
}
define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vrem_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1029851
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmulh.vx v9, v8, a0
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v10, v9, a0
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vadd.vv v9, v9, v10
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv1i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 748983
; RV32-NEXT: addi a0, a0, -586
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: lui a0, 898779
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmulh.vv v9, v8, v9
; RV32-NEXT: addi a0, zero, 63
; RV32-NEXT: vsrl.vx v10, v9, a0
; RV32-NEXT: vsra.vi v9, v9, 1
; RV32-NEXT: vadd.vv v9, v9, v10
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv1i64_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1029851
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vmulh.vx v9, v8, a0
; RV64-NEXT: addi a0, zero, 63
; RV64-NEXT: vsrl.vx v10, v9, a0
; RV64-NEXT: vsra.vi v9, v9, 1
; RV64-NEXT: vadd.vv v9, v9, v10
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = srem <vscale x 1 x i64> %va, %splat
@ -803,11 +988,24 @@ define <vscale x 2 x i64> @vrem_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
}
define <vscale x 2 x i64> @vrem_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vrem_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vrem.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i64> %va, %splat
@ -815,25 +1013,48 @@ define <vscale x 2 x i64> @vrem_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
}
define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vrem_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1029851
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmulh.vx v10, v8, a0
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v12, v10, a0
; CHECK-NEXT: vsra.vi v10, v10, 1
; CHECK-NEXT: vadd.vv v10, v10, v12
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v10
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv2i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 748983
; RV32-NEXT: addi a0, a0, -586
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: lui a0, 898779
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmulh.vv v10, v8, v10
; RV32-NEXT: addi a0, zero, 63
; RV32-NEXT: vsrl.vx v12, v10, a0
; RV32-NEXT: vsra.vi v10, v10, 1
; RV32-NEXT: vadd.vv v10, v10, v12
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv2i64_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1029851
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vmulh.vx v10, v8, a0
; RV64-NEXT: addi a0, zero, 63
; RV64-NEXT: vsrl.vx v12, v10, a0
; RV64-NEXT: vsra.vi v10, v10, 1
; RV64-NEXT: vadd.vv v10, v10, v12
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v10
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = srem <vscale x 2 x i64> %va, %splat
@ -851,11 +1072,24 @@ define <vscale x 4 x i64> @vrem_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
}
define <vscale x 4 x i64> @vrem_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vrem_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vrem.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i64> %va, %splat
@ -863,25 +1097,48 @@ define <vscale x 4 x i64> @vrem_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
}
define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vrem_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1029851
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmulh.vx v12, v8, a0
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v16, v12, a0
; CHECK-NEXT: vsra.vi v12, v12, 1
; CHECK-NEXT: vadd.vv v12, v12, v16
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v12
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv4i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 748983
; RV32-NEXT: addi a0, a0, -586
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: lui a0, 898779
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmulh.vv v12, v8, v12
; RV32-NEXT: addi a0, zero, 63
; RV32-NEXT: vsrl.vx v16, v12, a0
; RV32-NEXT: vsra.vi v12, v12, 1
; RV32-NEXT: vadd.vv v12, v12, v16
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv4i64_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1029851
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vmulh.vx v12, v8, a0
; RV64-NEXT: addi a0, zero, 63
; RV64-NEXT: vsrl.vx v16, v12, a0
; RV64-NEXT: vsra.vi v12, v12, 1
; RV64-NEXT: vadd.vv v12, v12, v16
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v12
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = srem <vscale x 4 x i64> %va, %splat
@ -899,11 +1156,24 @@ define <vscale x 8 x i64> @vrem_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
}
define <vscale x 8 x i64> @vrem_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vrem_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vrem.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vrem.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i64> %va, %splat
@ -911,25 +1181,48 @@ define <vscale x 8 x i64> @vrem_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
}
define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vrem_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1029851
; CHECK-NEXT: addiw a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: slli a0, a0, 12
; CHECK-NEXT: addi a0, a0, 1755
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmulh.vx v16, v8, a0
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsrl.vx v24, v16, a0
; CHECK-NEXT: vsra.vi v16, v16, 1
; CHECK-NEXT: vadd.vv v16, v16, v24
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v16
; CHECK-NEXT: ret
; RV32-LABEL: vrem_vi_nxv8i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 748983
; RV32-NEXT: addi a0, a0, -586
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: lui a0, 898779
; RV32-NEXT: addi a0, a0, 1755
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmulh.vv v16, v8, v16
; RV32-NEXT: addi a0, zero, 63
; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vsra.vi v16, v16, 1
; RV32-NEXT: vadd.vv v16, v16, v24
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vi_nxv8i64_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 1029851
; RV64-NEXT: addiw a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: slli a0, a0, 12
; RV64-NEXT: addi a0, a0, 1755
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vmulh.vx v16, v8, a0
; RV64-NEXT: addi a0, zero, 63
; RV64-NEXT: vsrl.vx v24, v16, a0
; RV64-NEXT: vsra.vi v16, v16, 1
; RV64-NEXT: vadd.vv v16, v16, v24
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v16
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = srem <vscale x 8 x i64> %va, %splat

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vremu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vremu_vv_nxv1i8:
@ -290,16 +291,27 @@ define <vscale x 1 x i16> @vremu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext
}
define <vscale x 1 x i16> @vremu_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vremu_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmulhu.vx v9, v8, a0
; CHECK-NEXT: vsrl.vi v9, v9, 13
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv1i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; RV32-NEXT: vmulhu.vx v9, v8, a0
; RV32-NEXT: vsrl.vi v9, v9, 13
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv1i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; RV64-NEXT: vmulhu.vx v9, v8, a0
; RV64-NEXT: vsrl.vi v9, v9, 13
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = urem <vscale x 1 x i16> %va, %splat
@ -329,16 +341,27 @@ define <vscale x 2 x i16> @vremu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext
}
define <vscale x 2 x i16> @vremu_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vremu_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmulhu.vx v9, v8, a0
; CHECK-NEXT: vsrl.vi v9, v9, 13
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv2i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; RV32-NEXT: vmulhu.vx v9, v8, a0
; RV32-NEXT: vsrl.vi v9, v9, 13
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv2i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; RV64-NEXT: vmulhu.vx v9, v8, a0
; RV64-NEXT: vsrl.vi v9, v9, 13
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = urem <vscale x 2 x i16> %va, %splat
@ -368,16 +391,27 @@ define <vscale x 4 x i16> @vremu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext
}
define <vscale x 4 x i16> @vremu_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vremu_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmulhu.vx v9, v8, a0
; CHECK-NEXT: vsrl.vi v9, v9, 13
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv4i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; RV32-NEXT: vmulhu.vx v9, v8, a0
; RV32-NEXT: vsrl.vi v9, v9, 13
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv4i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; RV64-NEXT: vmulhu.vx v9, v8, a0
; RV64-NEXT: vsrl.vi v9, v9, 13
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = urem <vscale x 4 x i16> %va, %splat
@ -407,16 +441,27 @@ define <vscale x 8 x i16> @vremu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext
}
define <vscale x 8 x i16> @vremu_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vremu_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmulhu.vx v10, v8, a0
; CHECK-NEXT: vsrl.vi v10, v10, 13
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v10
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv8i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; RV32-NEXT: vmulhu.vx v10, v8, a0
; RV32-NEXT: vsrl.vi v10, v10, 13
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv8i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; RV64-NEXT: vmulhu.vx v10, v8, a0
; RV64-NEXT: vsrl.vi v10, v10, 13
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v10
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = urem <vscale x 8 x i16> %va, %splat
@ -446,16 +491,27 @@ define <vscale x 16 x i16> @vremu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signe
}
define <vscale x 16 x i16> @vremu_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vremu_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmulhu.vx v12, v8, a0
; CHECK-NEXT: vsrl.vi v12, v12, 13
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v12
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv16i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; RV32-NEXT: vmulhu.vx v12, v8, a0
; RV32-NEXT: vsrl.vi v12, v12, 13
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv16i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; RV64-NEXT: vmulhu.vx v12, v8, a0
; RV64-NEXT: vsrl.vi v12, v12, 13
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v12
; RV64-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = urem <vscale x 16 x i16> %va, %splat
@ -485,16 +541,27 @@ define <vscale x 32 x i16> @vremu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signe
}
define <vscale x 32 x i16> @vremu_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vremu_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmulhu.vx v16, v8, a0
; CHECK-NEXT: vsrl.vi v16, v16, 13
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v16
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv32i16_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 2
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; RV32-NEXT: vmulhu.vx v16, v8, a0
; RV32-NEXT: vsrl.vi v16, v16, 13
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv32i16_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 2
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; RV64-NEXT: vmulhu.vx v16, v8, a0
; RV64-NEXT: vsrl.vi v16, v16, 13
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v16
; RV64-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = urem <vscale x 32 x i16> %va, %splat
@ -524,16 +591,27 @@ define <vscale x 1 x i32> @vremu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext
}
define <vscale x 1 x i32> @vremu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vremu_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmulhu.vx v9, v8, a0
; CHECK-NEXT: vsrl.vi v9, v9, 29
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv1i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; RV32-NEXT: vmulhu.vx v9, v8, a0
; RV32-NEXT: vsrl.vi v9, v9, 29
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv1i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 131072
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; RV64-NEXT: vmulhu.vx v9, v8, a0
; RV64-NEXT: vsrl.vi v9, v9, 29
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = urem <vscale x 1 x i32> %va, %splat
@ -563,16 +641,27 @@ define <vscale x 2 x i32> @vremu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext
}
define <vscale x 2 x i32> @vremu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vremu_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmulhu.vx v9, v8, a0
; CHECK-NEXT: vsrl.vi v9, v9, 29
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv2i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; RV32-NEXT: vmulhu.vx v9, v8, a0
; RV32-NEXT: vsrl.vi v9, v9, 29
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv2i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 131072
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; RV64-NEXT: vmulhu.vx v9, v8, a0
; RV64-NEXT: vsrl.vi v9, v9, 29
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = urem <vscale x 2 x i32> %va, %splat
@ -602,16 +691,27 @@ define <vscale x 4 x i32> @vremu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext
}
define <vscale x 4 x i32> @vremu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vremu_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmulhu.vx v10, v8, a0
; CHECK-NEXT: vsrl.vi v10, v10, 29
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v10
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv4i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vmulhu.vx v10, v8, a0
; RV32-NEXT: vsrl.vi v10, v10, 29
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv4i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 131072
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV64-NEXT: vmulhu.vx v10, v8, a0
; RV64-NEXT: vsrl.vi v10, v10, 29
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v10
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = urem <vscale x 4 x i32> %va, %splat
@ -641,16 +741,27 @@ define <vscale x 8 x i32> @vremu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext
}
define <vscale x 8 x i32> @vremu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vremu_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmulhu.vx v12, v8, a0
; CHECK-NEXT: vsrl.vi v12, v12, 29
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v12
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv8i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vmulhu.vx v12, v8, a0
; RV32-NEXT: vsrl.vi v12, v12, 29
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv8i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 131072
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV64-NEXT: vmulhu.vx v12, v8, a0
; RV64-NEXT: vsrl.vi v12, v12, 29
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v12
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = urem <vscale x 8 x i32> %va, %splat
@ -680,16 +791,27 @@ define <vscale x 16 x i32> @vremu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signe
}
define <vscale x 16 x i32> @vremu_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vremu_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 131072
; CHECK-NEXT: addiw a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmulhu.vx v16, v8, a0
; CHECK-NEXT: vsrl.vi v16, v16, 29
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v16
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv16i32_0:
; RV32: # %bb.0:
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: addi a0, a0, 1
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; RV32-NEXT: vmulhu.vx v16, v8, a0
; RV32-NEXT: vsrl.vi v16, v16, 29
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv16i32_0:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, 131072
; RV64-NEXT: addiw a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; RV64-NEXT: vmulhu.vx v16, v8, a0
; RV64-NEXT: vsrl.vi v16, v16, 29
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v16
; RV64-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = urem <vscale x 16 x i32> %va, %splat
@ -707,11 +829,24 @@ define <vscale x 1 x i64> @vremu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
}
define <vscale x 1 x i64> @vremu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vremu_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vremu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vremu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = urem <vscale x 1 x i64> %va, %splat
@ -719,18 +854,37 @@ define <vscale x 1 x i64> @vremu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
}
define <vscale x 1 x i64> @vremu_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vremu_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: slli a0, a0, 61
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmulhu.vx v9, v8, a0
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v9, v9, a0
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v9
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv1i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmulhu.vv v9, v8, v9
; RV32-NEXT: addi a0, zero, 61
; RV32-NEXT: vsrl.vx v9, v9, a0
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv1i64_0:
; RV64: # %bb.0:
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: slli a0, a0, 61
; RV64-NEXT: addi a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vmulhu.vx v9, v8, a0
; RV64-NEXT: addi a0, zero, 61
; RV64-NEXT: vsrl.vx v9, v9, a0
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = urem <vscale x 1 x i64> %va, %splat
@ -779,11 +933,24 @@ define <vscale x 2 x i64> @vremu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
}
define <vscale x 2 x i64> @vremu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vremu_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vremu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vremu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = urem <vscale x 2 x i64> %va, %splat
@ -791,18 +958,37 @@ define <vscale x 2 x i64> @vremu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
}
define <vscale x 2 x i64> @vremu_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vremu_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: slli a0, a0, 61
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmulhu.vx v10, v8, a0
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v10, v10, a0
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v10
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv2i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmulhu.vv v10, v8, v10
; RV32-NEXT: addi a0, zero, 61
; RV32-NEXT: vsrl.vx v10, v10, a0
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv2i64_0:
; RV64: # %bb.0:
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: slli a0, a0, 61
; RV64-NEXT: addi a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vmulhu.vx v10, v8, a0
; RV64-NEXT: addi a0, zero, 61
; RV64-NEXT: vsrl.vx v10, v10, a0
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v10
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = urem <vscale x 2 x i64> %va, %splat
@ -851,11 +1037,24 @@ define <vscale x 4 x i64> @vremu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
}
define <vscale x 4 x i64> @vremu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vremu_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vremu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vremu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = urem <vscale x 4 x i64> %va, %splat
@ -863,18 +1062,37 @@ define <vscale x 4 x i64> @vremu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
}
define <vscale x 4 x i64> @vremu_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vremu_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: slli a0, a0, 61
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmulhu.vx v12, v8, a0
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v12, v12, a0
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v12
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv4i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmulhu.vv v12, v8, v12
; RV32-NEXT: addi a0, zero, 61
; RV32-NEXT: vsrl.vx v12, v12, a0
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv4i64_0:
; RV64: # %bb.0:
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: slli a0, a0, 61
; RV64-NEXT: addi a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vmulhu.vx v12, v8, a0
; RV64-NEXT: addi a0, zero, 61
; RV64-NEXT: vsrl.vx v12, v12, a0
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v12
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = urem <vscale x 4 x i64> %va, %splat
@ -923,11 +1141,24 @@ define <vscale x 8 x i64> @vremu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
}
define <vscale x 8 x i64> @vremu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vremu_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vremu.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vremu.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = urem <vscale x 8 x i64> %va, %splat
@ -935,18 +1166,37 @@ define <vscale x 8 x i64> @vremu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
}
define <vscale x 8 x i64> @vremu_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vremu_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: slli a0, a0, 61
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmulhu.vx v16, v8, a0
; CHECK-NEXT: addi a0, zero, 61
; CHECK-NEXT: vsrl.vx v16, v16, a0
; CHECK-NEXT: addi a0, zero, -7
; CHECK-NEXT: vnmsac.vx v8, a0, v16
; CHECK-NEXT: ret
; RV32-LABEL: vremu_vi_nxv8i64_0:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lui a0, 131072
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: addi a0, zero, 1
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmulhu.vv v16, v8, v16
; RV32-NEXT: addi a0, zero, 61
; RV32-NEXT: vsrl.vx v16, v16, a0
; RV32-NEXT: addi a0, zero, -7
; RV32-NEXT: vnmsac.vx v8, a0, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vi_nxv8i64_0:
; RV64: # %bb.0:
; RV64-NEXT: addi a0, zero, 1
; RV64-NEXT: slli a0, a0, 61
; RV64-NEXT: addi a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vmulhu.vx v16, v8, a0
; RV64-NEXT: addi a0, zero, 61
; RV64-NEXT: vsrl.vx v16, v16, a0
; RV64-NEXT: addi a0, zero, -7
; RV64-NEXT: vnmsac.vx v8, a0, v16
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = urem <vscale x 8 x i64> %va, %splat

View File

@ -1,559 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vrsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i8> %splat, %va
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vrsub_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vrsub_vi_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 -4, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i8> %splat, %va
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vrsub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i8> %splat, %va
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vrsub_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vrsub_vi_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 -4, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i8> %splat, %va
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vrsub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i8> %splat, %va
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vrsub_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vrsub_vi_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 -4, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i8> %splat, %va
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vrsub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i8> %splat, %va
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vrsub_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vrsub_vi_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -4, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i8> %splat, %va
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vrsub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i8> %splat, %va
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vrsub_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vrsub_vi_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 -4, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i8> %splat, %va
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vrsub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = sub <vscale x 32 x i8> %splat, %va
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vrsub_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vrsub_vi_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 -4, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = sub <vscale x 32 x i8> %splat, %va
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vrsub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = sub <vscale x 64 x i8> %splat, %va
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vrsub_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vrsub_vi_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 -4, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = sub <vscale x 64 x i8> %splat, %va
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vrsub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i16> %splat, %va
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vrsub_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vrsub_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 -4, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i16> %splat, %va
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vrsub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i16> %splat, %va
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vrsub_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vrsub_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 -4, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i16> %splat, %va
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vrsub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i16> %splat, %va
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vrsub_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vrsub_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 -4, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i16> %splat, %va
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vrsub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i16> %splat, %va
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vrsub_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vrsub_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -4, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i16> %splat, %va
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vrsub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i16> %splat, %va
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vrsub_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vrsub_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 -4, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i16> %splat, %va
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vrsub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = sub <vscale x 32 x i16> %splat, %va
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vrsub_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vrsub_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 -4, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = sub <vscale x 32 x i16> %splat, %va
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vrsub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vrsub_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i32> %splat, %va
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vrsub_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vrsub_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 -4, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i32> %splat, %va
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vrsub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vrsub_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i32> %splat, %va
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vrsub_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vrsub_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 -4, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i32> %splat, %va
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vrsub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vrsub_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i32> %splat, %va
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vrsub_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vrsub_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 -4, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i32> %splat, %va
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vrsub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vrsub_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i32> %splat, %va
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vrsub_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vrsub_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -4, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i32> %splat, %va
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vrsub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vrsub_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i32> %splat, %va
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vrsub_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vrsub_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 -4, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i32> %splat, %va
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vrsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vrsub_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vsub.vv v8, v9, v8
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i64> %splat, %va
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vrsub_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vrsub_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 -4, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i64> %splat, %va
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vrsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vrsub_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vsub.vv v8, v10, v8
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i64> %splat, %va
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vrsub_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vrsub_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 -4, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i64> %splat, %va
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vrsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vrsub_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vsub.vv v8, v12, v8
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i64> %splat, %va
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vrsub_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vrsub_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 -4, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i64> %splat, %va
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vrsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vrsub_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vsub.vv v8, v16, v8
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i64> %splat, %va
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vrsub_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vrsub_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vrsub.vi v8, v8, -4
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -4, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i64> %splat, %va
ret <vscale x 8 x i64> %vc
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vrsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vrsub_vx_nxv1i8:
@ -434,11 +435,24 @@ define <vscale x 16 x i32> @vrsub_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
}
define <vscale x 1 x i64> @vrsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vrsub_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vrsub_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsub.vv v8, v9, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vrsub.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i64> %splat, %va
@ -458,11 +472,24 @@ define <vscale x 1 x i64> @vrsub_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
}
define <vscale x 2 x i64> @vrsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vrsub_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vrsub_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsub.vv v8, v10, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vrsub.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i64> %splat, %va
@ -482,11 +509,24 @@ define <vscale x 2 x i64> @vrsub_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
}
define <vscale x 4 x i64> @vrsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vrsub_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vrsub_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsub.vv v8, v12, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vrsub.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i64> %splat, %va
@ -506,11 +546,24 @@ define <vscale x 4 x i64> @vrsub_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
}
define <vscale x 8 x i64> @vrsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vrsub_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vrsub.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vrsub_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsub.vv v8, v16, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vrsub.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i64> %splat, %va

View File

@ -1,630 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vshl_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vshl_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = shl <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vshl_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vshl_vx_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = shl <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vshl_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vshl_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = shl <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vshl_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vshl_vx_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = shl <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vshl_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vshl_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = shl <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vshl_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vshl_vx_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = shl <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vshl_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vshl_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = shl <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vshl_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vshl_vx_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = shl <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vshl_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vshl_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = shl <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vshl_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vshl_vx_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = shl <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vshl_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vshl_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = shl <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vshl_vx_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vshl_vx_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = shl <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vshl_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vshl_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = shl <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vshl_vx_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vshl_vx_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = shl <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vshl_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vshl_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = shl <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vshl_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vshl_vx_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = shl <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vshl_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vshl_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = shl <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vshl_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vshl_vx_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = shl <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vshl_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vshl_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = shl <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vshl_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vshl_vx_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = shl <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vshl_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vshl_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = shl <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vshl_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vshl_vx_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = shl <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vshl_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vshl_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = shl <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vshl_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vshl_vx_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = shl <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vshl_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vshl_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = shl <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vshl_vx_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vshl_vx_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = shl <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vshl_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vshl_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = shl <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vshl_vx_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vshl_vx_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = shl <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vshl_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vshl_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = shl <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vshl_vx_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vshl_vx_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = shl <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vshl_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vshl_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = shl <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vshl_vx_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vshl_vx_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = shl <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vshl_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vshl_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = shl <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vshl_vx_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vshl_vx_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = shl <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vshl_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vshl_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = shl <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vshl_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vshl_vx_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = shl <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vshl_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vshl_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = shl <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vshl_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = shl <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vshl_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv1i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = shl <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vshl_vx_nxv1i64_2(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv1i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 1, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = shl <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vshl_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vshl_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = shl <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vshl_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = shl <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vshl_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv2i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = shl <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vshl_vx_nxv2i64_2(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv2i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 1, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = shl <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vshl_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vshl_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = shl <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vshl_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = shl <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vshl_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv4i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = shl <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vshl_vx_nxv4i64_2(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv4i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 1, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = shl <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vshl_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vshl_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = shl <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vshl_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsll.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = shl <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vshl_vx_nxv8i64_1(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv8i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = shl <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vshl_vx_nxv8i64_2(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vshl_vx_nxv8i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = shl <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,4 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vshl_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {

View File

@ -1,803 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vsra_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vsra_vv_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = ashr <vscale x 1 x i8> %va, %vb
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vsra_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsra_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = ashr <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vsra_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vsra_vi_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = ashr <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vsra_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vsra_vv_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = ashr <vscale x 2 x i8> %va, %vb
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vsra_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsra_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = ashr <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vsra_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vsra_vi_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = ashr <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vsra_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vsra_vv_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = ashr <vscale x 4 x i8> %va, %vb
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vsra_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsra_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = ashr <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vsra_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vsra_vi_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = ashr <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vsra_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vsra_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = ashr <vscale x 8 x i8> %va, %vb
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vsra_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsra_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = ashr <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vsra_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vsra_vi_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = ashr <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vsra_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
; CHECK-LABEL: vsra_vv_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = ashr <vscale x 16 x i8> %va, %vb
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vsra_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsra_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = ashr <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vsra_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vsra_vi_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = ashr <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vsra_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
; CHECK-LABEL: vsra_vv_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = ashr <vscale x 32 x i8> %va, %vb
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vsra_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsra_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = ashr <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vsra_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vsra_vi_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = ashr <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vsra_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
; CHECK-LABEL: vsra_vv_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = ashr <vscale x 64 x i8> %va, %vb
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vsra_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsra_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = ashr <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vsra_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vsra_vi_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = ashr <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vsra_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vsra_vv_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = ashr <vscale x 1 x i16> %va, %vb
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vsra_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsra_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = ashr <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vsra_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vsra_vi_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = ashr <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vsra_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vsra_vv_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = ashr <vscale x 2 x i16> %va, %vb
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vsra_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsra_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = ashr <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vsra_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vsra_vi_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = ashr <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vsra_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vsra_vv_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = ashr <vscale x 4 x i16> %va, %vb
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vsra_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsra_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = ashr <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vsra_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vsra_vi_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = ashr <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vsra_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vsra_vv_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = ashr <vscale x 8 x i16> %va, %vb
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vsra_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsra_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = ashr <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vsra_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vsra_vi_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = ashr <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vsra_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
; CHECK-LABEL: vsra_vv_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = ashr <vscale x 16 x i16> %va, %vb
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vsra_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsra_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = ashr <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vsra_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vsra_vi_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = ashr <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vsra_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
; CHECK-LABEL: vsra_vv_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = ashr <vscale x 32 x i16> %va, %vb
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vsra_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsra_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = ashr <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vsra_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vsra_vi_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = ashr <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vsra_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vsra_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = ashr <vscale x 1 x i32> %va, %vb
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vsra_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vsra_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = ashr <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vsra_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vsra_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = ashr <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vsra_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
; CHECK-LABEL: vsra_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = ashr <vscale x 2 x i32> %va, %vb
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vsra_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vsra_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = ashr <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vsra_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vsra_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = ashr <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vsra_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
; CHECK-LABEL: vsra_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = ashr <vscale x 4 x i32> %va, %vb
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vsra_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vsra_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = ashr <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vsra_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vsra_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = ashr <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vsra_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
; CHECK-LABEL: vsra_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = ashr <vscale x 8 x i32> %va, %vb
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vsra_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vsra_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = ashr <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vsra_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vsra_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = ashr <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vsra_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
; CHECK-LABEL: vsra_vv_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = ashr <vscale x 16 x i32> %va, %vb
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vsra_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vsra_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = ashr <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vsra_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vsra_vi_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = ashr <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vsra_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
; CHECK-LABEL: vsra_vv_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = ashr <vscale x 1 x i64> %va, %vb
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vsra_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vsra_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = ashr <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vsra_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vsra_vi_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = ashr <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vsra_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vsra_vi_nxv1i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = ashr <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vsra_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
; CHECK-LABEL: vsra_vv_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = ashr <vscale x 2 x i64> %va, %vb
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vsra_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vsra_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = ashr <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vsra_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vsra_vi_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = ashr <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vsra_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vsra_vi_nxv2i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = ashr <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vsra_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
; CHECK-LABEL: vsra_vv_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = ashr <vscale x 4 x i64> %va, %vb
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vsra_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vsra_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = ashr <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vsra_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vsra_vi_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = ashr <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vsra_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vsra_vi_nxv4i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = ashr <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vsra_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vsra_vv_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsra.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = ashr <vscale x 8 x i64> %va, %vb
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vsra_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vsra_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = ashr <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vsra_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vsra_vi_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsra.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = ashr <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vsra_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vsra_vi_nxv8i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = ashr <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,4 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vsra_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {

View File

@ -1,583 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vsrl_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = lshr <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vsrl_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vsrl_vx_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = lshr <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vsrl_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = lshr <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vsrl_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vsrl_vx_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = lshr <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vsrl_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = lshr <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vsrl_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vsrl_vx_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = lshr <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vsrl_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = lshr <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vsrl_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vsrl_vx_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = lshr <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vsrl_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = lshr <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vsrl_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vsrl_vx_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = lshr <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vsrl_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = lshr <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vsrl_vx_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vsrl_vx_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = lshr <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vsrl_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = lshr <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vsrl_vx_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vsrl_vx_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 6, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = lshr <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vsrl_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = lshr <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vsrl_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vsrl_vx_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = lshr <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vsrl_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = lshr <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vsrl_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vsrl_vx_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = lshr <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vsrl_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = lshr <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vsrl_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vsrl_vx_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = lshr <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vsrl_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = lshr <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vsrl_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vsrl_vx_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = lshr <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vsrl_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = lshr <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vsrl_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vsrl_vx_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = lshr <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vsrl_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsrl_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = lshr <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vsrl_vx_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vsrl_vx_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 6
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 6, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = lshr <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vsrl_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vsrl_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = lshr <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vsrl_vx_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vsrl_vx_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = lshr <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vsrl_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vsrl_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = lshr <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vsrl_vx_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vsrl_vx_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = lshr <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vsrl_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vsrl_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = lshr <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vsrl_vx_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vsrl_vx_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = lshr <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vsrl_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vsrl_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = lshr <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vsrl_vx_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vsrl_vx_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = lshr <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vsrl_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vsrl_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = lshr <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vsrl_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vsrl_vx_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 31, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = lshr <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vsrl_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vsrl_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = lshr <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vsrl_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vsrl_vx_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = lshr <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vsrl_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vsrl_vx_nxv1i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = lshr <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vsrl_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vsrl_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = lshr <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vsrl_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vsrl_vx_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = lshr <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vsrl_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vsrl_vx_nxv2i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = lshr <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vsrl_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vsrl_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = lshr <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vsrl_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vsrl_vx_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = lshr <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vsrl_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vsrl_vx_nxv4i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = lshr <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vsrl_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vsrl_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = lshr <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vsrl_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vsrl_vx_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsrl.vi v8, v8, 31
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 31, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = lshr <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vsrl_vx_nxv8i64_1(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vsrl_vx_nxv8i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 32, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = lshr <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,4 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vsrl_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {

View File

@ -1,816 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vsub_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vsub_vv_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sub <vscale x 1 x i8> %va, %vb
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsub_vx_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
define <vscale x 1 x i8> @vsub_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
; CHECK-LABEL: vsub_vx_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 1, i32 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i8> %va, %splat
ret <vscale x 1 x i8> %vc
}
; Test constant subs to see if we can optimize them away for scalable vectors.
define <vscale x 1 x i8> @vsub_ii_nxv1i8_1() {
; CHECK-LABEL: vsub_ii_nxv1i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: ret
%heada = insertelement <vscale x 1 x i8> undef, i8 2, i32 0
%splata = shufflevector <vscale x 1 x i8> %heada, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%headb = insertelement <vscale x 1 x i8> undef, i8 3, i32 0
%splatb = shufflevector <vscale x 1 x i8> %headb, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i8> %splata, %splatb
ret <vscale x 1 x i8> %vc
}
define <vscale x 2 x i8> @vsub_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vsub_vv_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sub <vscale x 2 x i8> %va, %vb
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vsub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsub_vx_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 2 x i8> @vsub_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
; CHECK-LABEL: vsub_vx_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 1, i32 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i8> %va, %splat
ret <vscale x 2 x i8> %vc
}
define <vscale x 4 x i8> @vsub_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vsub_vv_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sub <vscale x 4 x i8> %va, %vb
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vsub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsub_vx_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 4 x i8> @vsub_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
; CHECK-LABEL: vsub_vx_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 1, i32 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i8> %va, %splat
ret <vscale x 4 x i8> %vc
}
define <vscale x 8 x i8> @vsub_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vsub_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sub <vscale x 8 x i8> %va, %vb
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vsub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsub_vx_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 8 x i8> @vsub_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
; CHECK-LABEL: vsub_vx_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i8> %va, %splat
ret <vscale x 8 x i8> %vc
}
define <vscale x 16 x i8> @vsub_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
; CHECK-LABEL: vsub_vv_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = sub <vscale x 16 x i8> %va, %vb
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vsub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsub_vx_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 16 x i8> @vsub_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
; CHECK-LABEL: vsub_vx_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i8> undef, i8 1, i32 0
%splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i8> %va, %splat
ret <vscale x 16 x i8> %vc
}
define <vscale x 32 x i8> @vsub_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
; CHECK-LABEL: vsub_vv_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = sub <vscale x 32 x i8> %va, %vb
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vsub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsub_vx_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = sub <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 32 x i8> @vsub_vx_nxv32i8_0(<vscale x 32 x i8> %va) {
; CHECK-LABEL: vsub_vx_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i8> undef, i8 1, i32 0
%splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
%vc = sub <vscale x 32 x i8> %va, %splat
ret <vscale x 32 x i8> %vc
}
define <vscale x 64 x i8> @vsub_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
; CHECK-LABEL: vsub_vv_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = sub <vscale x 64 x i8> %va, %vb
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vsub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
; CHECK-LABEL: vsub_vx_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = sub <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 64 x i8> @vsub_vx_nxv64i8_0(<vscale x 64 x i8> %va) {
; CHECK-LABEL: vsub_vx_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 64 x i8> undef, i8 1, i32 0
%splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
%vc = sub <vscale x 64 x i8> %va, %splat
ret <vscale x 64 x i8> %vc
}
define <vscale x 1 x i16> @vsub_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vsub_vv_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sub <vscale x 1 x i16> %va, %vb
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vsub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsub_vx_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 1 x i16> @vsub_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vsub_vx_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 1, i32 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i16> %va, %splat
ret <vscale x 1 x i16> %vc
}
define <vscale x 2 x i16> @vsub_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vsub_vv_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sub <vscale x 2 x i16> %va, %vb
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vsub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsub_vx_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 2 x i16> @vsub_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vsub_vx_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 1, i32 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i16> %va, %splat
ret <vscale x 2 x i16> %vc
}
define <vscale x 4 x i16> @vsub_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vsub_vv_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sub <vscale x 4 x i16> %va, %vb
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vsub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsub_vx_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 4 x i16> @vsub_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vsub_vx_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 1, i32 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i16> %va, %splat
ret <vscale x 4 x i16> %vc
}
define <vscale x 8 x i16> @vsub_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vsub_vv_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = sub <vscale x 8 x i16> %va, %vb
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vsub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsub_vx_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 8 x i16> @vsub_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vsub_vx_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i16> %va, %splat
ret <vscale x 8 x i16> %vc
}
define <vscale x 16 x i16> @vsub_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
; CHECK-LABEL: vsub_vv_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = sub <vscale x 16 x i16> %va, %vb
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vsub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsub_vx_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 16 x i16> @vsub_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vsub_vx_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i16> undef, i16 1, i32 0
%splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i16> %va, %splat
ret <vscale x 16 x i16> %vc
}
define <vscale x 32 x i16> @vsub_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
; CHECK-LABEL: vsub_vv_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = sub <vscale x 32 x i16> %va, %vb
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vsub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
; CHECK-LABEL: vsub_vx_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = sub <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 32 x i16> @vsub_vx_nxv32i16_0(<vscale x 32 x i16> %va) {
; CHECK-LABEL: vsub_vx_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i16> undef, i16 1, i32 0
%splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
%vc = sub <vscale x 32 x i16> %va, %splat
ret <vscale x 32 x i16> %vc
}
define <vscale x 1 x i32> @vsub_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vsub_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sub <vscale x 1 x i32> %va, %vb
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vsub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) {
; CHECK-LABEL: vsub_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 1 x i32> @vsub_vx_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vsub_vx_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i32> undef, i32 1, i32 0
%splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i32> %va, %splat
ret <vscale x 1 x i32> %vc
}
define <vscale x 2 x i32> @vsub_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
; CHECK-LABEL: vsub_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sub <vscale x 2 x i32> %va, %vb
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vsub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) {
; CHECK-LABEL: vsub_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 2 x i32> @vsub_vx_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vsub_vx_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i32> undef, i32 1, i32 0
%splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i32> %va, %splat
ret <vscale x 2 x i32> %vc
}
define <vscale x 4 x i32> @vsub_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
; CHECK-LABEL: vsub_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = sub <vscale x 4 x i32> %va, %vb
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vsub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) {
; CHECK-LABEL: vsub_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 4 x i32> @vsub_vx_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vsub_vx_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
%splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i32> %va, %splat
ret <vscale x 4 x i32> %vc
}
define <vscale x 8 x i32> @vsub_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
; CHECK-LABEL: vsub_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = sub <vscale x 8 x i32> %va, %vb
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vsub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
; CHECK-LABEL: vsub_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 8 x i32> @vsub_vx_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vsub_vx_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i32> %va, %splat
ret <vscale x 8 x i32> %vc
}
define <vscale x 16 x i32> @vsub_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
; CHECK-LABEL: vsub_vv_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = sub <vscale x 16 x i32> %va, %vb
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vsub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) {
; CHECK-LABEL: vsub_vx_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 16 x i32> @vsub_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vsub_vx_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i32> undef, i32 1, i32 0
%splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
%vc = sub <vscale x 16 x i32> %va, %splat
ret <vscale x 16 x i32> %vc
}
define <vscale x 1 x i64> @vsub_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
; CHECK-LABEL: vsub_vv_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: ret
%vc = sub <vscale x 1 x i64> %va, %vb
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 1 x i64> @vsub_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vsub_vx_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 1, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i64> %va, %splat
ret <vscale x 1 x i64> %vc
}
define <vscale x 2 x i64> @vsub_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
; CHECK-LABEL: vsub_vv_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: ret
%vc = sub <vscale x 2 x i64> %va, %vb
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 2 x i64> @vsub_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vsub_vx_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 1, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i64> %va, %splat
ret <vscale x 2 x i64> %vc
}
define <vscale x 4 x i64> @vsub_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
; CHECK-LABEL: vsub_vv_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: ret
%vc = sub <vscale x 4 x i64> %va, %vb
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 4 x i64> @vsub_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vsub_vx_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 1, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i64> %va, %splat
ret <vscale x 4 x i64> %vc
}
define <vscale x 8 x i64> @vsub_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
; CHECK-LABEL: vsub_vv_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vsub.vv v8, v8, v16
; CHECK-NEXT: ret
%vc = sub <vscale x 8 x i64> %va, %vb
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vsub.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}
define <vscale x 8 x i64> @vsub_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vsub_vx_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 1
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i64> %va, %splat
ret <vscale x 8 x i64> %vc
}

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vsub_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vsub_vv_nxv1i8:
@ -657,11 +658,24 @@ define <vscale x 1 x i64> @vsub_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
}
define <vscale x 1 x i64> @vsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vsub_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vsub.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = sub <vscale x 1 x i64> %va, %splat
@ -692,11 +706,24 @@ define <vscale x 2 x i64> @vsub_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
}
define <vscale x 2 x i64> @vsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vsub_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vsub.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = sub <vscale x 2 x i64> %va, %splat
@ -727,11 +754,24 @@ define <vscale x 4 x i64> @vsub_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
}
define <vscale x 4 x i64> @vsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vsub_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vsub.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = sub <vscale x 4 x i64> %va, %splat
@ -762,11 +802,24 @@ define <vscale x 8 x i64> @vsub_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
}
define <vscale x 8 x i64> @vsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vsub.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vsub_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsub.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = sub <vscale x 8 x i64> %va, %splat

View File

@ -1,315 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vtrunc_nxv1i16_nxv1i8(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i16> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %tvec
}
define <vscale x 2 x i8> @vtrunc_nxv2i16_nxv2i8(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i16> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %tvec
}
define <vscale x 4 x i8> @vtrunc_nxv4i16_nxv4i8(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i16> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %tvec
}
define <vscale x 8 x i8> @vtrunc_nxv8i16_nxv8i8(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i16> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %tvec
}
define <vscale x 16 x i8> @vtrunc_nxv16i16_nxv16i8(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
%tvec = trunc <vscale x 16 x i16> %va to <vscale x 16 x i8>
ret <vscale x 16 x i8> %tvec
}
define <vscale x 1 x i8> @vtrunc_nxv1i32_nxv1i8(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %tvec
}
define <vscale x 1 x i16> @vtrunc_nxv1i32_nxv1i16(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %tvec
}
define <vscale x 2 x i8> @vtrunc_nxv2i32_nxv2i8(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %tvec
}
define <vscale x 2 x i16> @vtrunc_nxv2i32_nxv2i16(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %tvec
}
define <vscale x 4 x i8> @vtrunc_nxv4i32_nxv4i8(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %tvec
}
define <vscale x 4 x i16> @vtrunc_nxv4i32_nxv4i16(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i16>
ret <vscale x 4 x i16> %tvec
}
define <vscale x 8 x i8> @vtrunc_nxv8i32_nxv8i8(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %tvec
}
define <vscale x 8 x i16> @vtrunc_nxv8i32_nxv8i16(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i16>
ret <vscale x 8 x i16> %tvec
}
define <vscale x 16 x i8> @vtrunc_nxv16i32_nxv16i8(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i8>
ret <vscale x 16 x i8> %tvec
}
define <vscale x 16 x i16> @vtrunc_nxv16i32_nxv16i16(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: ret
%tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i16>
ret <vscale x 16 x i16> %tvec
}
define <vscale x 1 x i8> @vtrunc_nxv1i64_nxv1i8(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %tvec
}
define <vscale x 1 x i16> @vtrunc_nxv1i64_nxv1i16(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %tvec
}
define <vscale x 1 x i32> @vtrunc_nxv1i64_nxv1i32(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i32>
ret <vscale x 1 x i32> %tvec
}
define <vscale x 2 x i8> @vtrunc_nxv2i64_nxv2i8(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %tvec
}
define <vscale x 2 x i16> @vtrunc_nxv2i64_nxv2i16(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %tvec
}
define <vscale x 2 x i32> @vtrunc_nxv2i64_nxv2i32(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i32>
ret <vscale x 2 x i32> %tvec
}
define <vscale x 4 x i8> @vtrunc_nxv4i64_nxv4i8(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %tvec
}
define <vscale x 4 x i16> @vtrunc_nxv4i64_nxv4i16(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i16>
ret <vscale x 4 x i16> %tvec
}
define <vscale x 4 x i32> @vtrunc_nxv4i64_nxv4i32(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i32>
ret <vscale x 4 x i32> %tvec
}
define <vscale x 8 x i8> @vtrunc_nxv8i64_nxv8i8(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v16, 0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %tvec
}
define <vscale x 8 x i16> @vtrunc_nxv8i64_nxv8i16(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i16>
ret <vscale x 8 x i16> %tvec
}
define <vscale x 8 x i32> @vtrunc_nxv8i64_nxv8i32(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i32>
ret <vscale x 8 x i32> %tvec
}

View File

@ -1,4 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i8> @vtrunc_nxv1i16_nxv1i8(<vscale x 1 x i16> %va) {

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define <vscale x 1 x i8> @vxor_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vxor_vv_nxv1i8:
@ -1074,11 +1075,24 @@ define <vscale x 1 x i64> @vxor_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
}
define <vscale x 1 x i64> @vxor_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vxor_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vxor.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vxor_vx_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV64-NEXT: vxor.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
%vc = xor <vscale x 1 x i64> %va, %splat
@ -1133,11 +1147,24 @@ define <vscale x 2 x i64> @vxor_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
}
define <vscale x 2 x i64> @vxor_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vxor_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vxor.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vxor_vx_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV64-NEXT: vxor.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
%vc = xor <vscale x 2 x i64> %va, %splat
@ -1192,11 +1219,24 @@ define <vscale x 4 x i64> @vxor_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
}
define <vscale x 4 x i64> @vxor_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vxor_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vxor.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vxor_vx_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV64-NEXT: vxor.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
%vc = xor <vscale x 4 x i64> %va, %splat
@ -1251,11 +1291,24 @@ define <vscale x 8 x i64> @vxor_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
}
define <vscale x 8 x i64> @vxor_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vxor_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vxor.vx v8, v8, a0
; CHECK-NEXT: ret
; RV32-LABEL: vxor_vx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vxor.vx v8, v8, a0
; RV64-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
%vc = xor <vscale x 8 x i64> %va, %splat